Esempio n. 1
0
def run(ceph_cluster, **kw):
    try:
        log.info(f"MetaData Information {log.metadata} in {__name__}")
        fs_util = FsUtils(ceph_cluster)

        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        fuse_mounting_dir = f"/mnt/cephfs_fuse{mounting_dir}/"
        fs_util.fuse_mount(clients, fuse_mounting_dir)

        mount_test_case(clients, fuse_mounting_dir)

        kernel_mounting_dir = f"/mnt/cephfs_kernel{mounting_dir}/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(clients, kernel_mounting_dir, ",".join(mon_node_ips))

        mount_test_case(clients, kernel_mounting_dir)

        log.info("Cleaning up!-----")
        rc = fs_util.client_clean_up(
            [],
            clients,
            kernel_mounting_dir,
            "umount",
        )
        if rc != 0:
            raise CommandFailed("fuse clients cleanup failed")
        log.info("Fuse clients cleaned up successfully")

        rc = fs_util.client_clean_up(
            clients,
            [],
            fuse_mounting_dir,
            "umount",
        )
        if rc != 0:
            raise CommandFailed("kernel clients cleanup failed")
        log.info("kernel clients cleaned up successfully")
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name> --gid <num> --uid <num>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create cephfs subvolumegroup with customized uid and gid ")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1",
                "uid": "20",
                "gid": "30",
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1",
                "uid": "40",
                "gid": "50",
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info("Create 2 Sub volumes on each of the subvolume group Size 5 GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernel and 1 subvloume on Fuse → Client1"
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
        )

        log.info(
            "On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2"
        )
        if build.startswith("5"):
            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_3 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_4 subvolgroup_ec1",
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_2,
                extra_params=" --client_fs cephfs-ec",
            )

        log.info("Get the path of subvolume group")
        subvolgroup_default, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolumegroup getpath {default_fs} subvolgroup_1",
        )
        subvolgroup_default_path = subvolgroup_default.read().decode().strip()
        subvolgroup_ec, rc = clients[0].exec_command(
            sudo=True,
            cmd="ceph fs subvolumegroup getpath cephfs-ec subvolgroup_ec1",
        )
        subvolgroup_ec_path = subvolgroup_ec.read().decode().strip()

        def get_defined_uid(group_name, subvolumegroup_list):
            for subvolumegroup in subvolumegroup_list:
                if group_name == subvolumegroup["group_name"]:
                    return subvolumegroup.get("uid")

        log.info("Validate the uid of the subgroup")
        subgroup_1_uid = get_defined_uid(
            "subvolgroup_1", subvolumegroup_list=subvolumegroup_list
        )
        subgroup_2_uid = get_defined_uid(
            "subvolgroup_ec1", subvolumegroup_list=subvolumegroup_list
        )
        stat_of_uid_on_kernel_default_fs = fs_util.get_stats(
            client=clients[0],
            file_path=kernel_mounting_dir_1.rstrip("/") + subvolgroup_default_path,
            format="%u",
        )
        stat_of_uid_on_kernel_default_ec = fs_util.get_stats(
            client=clients[1],
            file_path=kernel_mounting_dir_2.rstrip("/") + subvolgroup_ec_path,
            format="%u",
        )
        stat_of_uid_on_fuse_default_fs = fs_util.get_stats(
            client=clients[0],
            file_path=fuse_mounting_dir_1.rstrip("/") + subvolgroup_default_path,
            format="%u",
        )
        stat_of_uid_on_fuse_default_ec = fs_util.get_stats(
            client=clients[1],
            file_path=fuse_mounting_dir_2.rstrip("/") + subvolgroup_ec_path,
            format="%u",
        )
        if int(subgroup_1_uid) != int(stat_of_uid_on_kernel_default_fs) and int(
            subgroup_1_uid
        ) != int(stat_of_uid_on_fuse_default_fs):
            log.error("UID is mismatching on sunvolgroup_1")
            return 1
        if int(subgroup_2_uid) != int(stat_of_uid_on_fuse_default_ec) and int(
            subgroup_2_uid
        ) != int(stat_of_uid_on_kernel_default_ec):
            log.error("UID is mismatching on subvolgroup_ec1")
            return 1

        def get_defined_gid(group_name, subvolumegroup_list):
            for subvolumegroup in subvolumegroup_list:
                if group_name == subvolumegroup["group_name"]:
                    return subvolumegroup.get("gid")

        log.info("Validate the gid of the subgroup")
        subgroup_1_gid = get_defined_gid(
            "subvolgroup_1", subvolumegroup_list=subvolumegroup_list
        )
        subgroup_2_gid = get_defined_gid(
            "subvolgroup_ec1", subvolumegroup_list=subvolumegroup_list
        )
        stat_of_gid_on_kernel_default_fs = fs_util.get_stats(
            client=clients[0],
            file_path=kernel_mounting_dir_1.rstrip("/") + subvolgroup_default_path,
            format="%g",
        )
        stat_of_gid_on_kernel_default_ec = fs_util.get_stats(
            client=clients[1],
            file_path=kernel_mounting_dir_2.rstrip("/") + subvolgroup_ec_path,
            format="%g",
        )
        stat_of_gid_on_fuse_default_fs = fs_util.get_stats(
            client=clients[0],
            file_path=fuse_mounting_dir_1.rstrip("/") + subvolgroup_default_path,
            format="%g",
        )
        stat_of_gid_on_fuse_default_ec = fs_util.get_stats(
            client=clients[1],
            file_path=fuse_mounting_dir_2.rstrip("/") + subvolgroup_ec_path,
            format="%g",
        )
        if int(subgroup_1_gid) != int(stat_of_gid_on_kernel_default_fs) and int(
            subgroup_1_gid
        ) != int(stat_of_gid_on_fuse_default_fs):
            log.error("GID is mismatching on sunvolgroup_1")
            return 1
        if int(subgroup_2_gid) != int(stat_of_gid_on_kernel_default_ec) and int(
            subgroup_2_gid
        ) != int(stat_of_gid_on_fuse_default_ec):
            log.error("GID is mismatching on subvolgroup_ec1")
            return 1

        run_ios(clients[0], kernel_mounting_dir_1)
        run_ios(clients[0], fuse_mounting_dir_1)
        run_ios(clients[1], kernel_mounting_dir_2)
        run_ios(clients[1], fuse_mounting_dir_2)

        log.info("Clean up the system")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)

        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[0]], mounting_dir=kernel_mounting_dir_1
        )
        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[1]], mounting_dir=kernel_mounting_dir_2
        )

        fs_util.client_clean_up(
            "umount", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir_1
        )
        fs_util.client_clean_up(
            "umount", fuse_clients=[clients[1]], mounting_dir=fuse_mounting_dir_2
        )

        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true"
        )

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Esempio n. 3
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. Remove the pools added as part of pool_layout
    3. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    4. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create 2 pools, 1 - Replicated , 1 - EC Data Pool")
        create_pools = [
            "ceph osd pool create cephfs-data-pool-layout",
            "ceph osd pool create cephfs-data-pool-layout-ec 64 erasure",
            "ceph osd pool set cephfs-data-pool-layout-ec allow_ec_overwrites true",
        ]
        for cmd in create_pools:
            clients[0].exec_command(sudo=True, cmd=cmd)
        log.info("Add created data pools to each of the filesystem")
        add_pool_to_FS = [
            "ceph fs add_data_pool cephfs cephfs-data-pool-layout",
            "ceph fs add_data_pool cephfs-ec cephfs-data-pool-layout-ec",
        ]
        for cmd in add_pool_to_FS:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create cephfs subvolumegroup with desired data pool_layout")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1",
                "pool_layout": "cephfs-data-pool-layout",
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1",
                "pool_layout": "cephfs-data-pool-layout-ec",
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info(
            "Create 2 Sub volumes on each of the subvolume group Size 5 GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernel and 1 subvloume on Fuse → Client1"
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.strip()}",
        )

        log.info(
            "On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2"
        )
        if build.startswith("5"):
            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_3 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.strip()}",
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_4 subvolgroup_ec1",
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_2,
                extra_params=f" -r {subvol_path.strip()} --client_fs cephfs-ec",
            )

        log.info(
            "Check the Pool status before the IO's to confirm if no IO's are going on on the pool attached"
        )
        get_pool_status_before = fs_util.get_pool_df(
            client=clients[0],
            pool_name="cephfs-data-pool-layout",
            vol_name=default_fs)
        get_pool_status_before_EC = fs_util.get_pool_df(
            client=clients[1],
            pool_name="cephfs-data-pool-layout-ec",
            vol_name="cephfs-ec",
        )

        run_ios(clients[0], kernel_mounting_dir_1)
        run_ios(clients[0], fuse_mounting_dir_1)
        run_ios(clients[1], kernel_mounting_dir_2)
        run_ios(clients[1], fuse_mounting_dir_2)

        log.info(
            "Check the Pool status and verify the IO's are going only to the Pool attached"
        )
        get_pool_status_after = fs_util.get_pool_df(
            client=clients[0],
            pool_name="cephfs-data-pool-layout",
            vol_name=default_fs)
        get_pool_status_after_EC = fs_util.get_pool_df(
            client=clients[1],
            pool_name="cephfs-data-pool-layout-ec",
            vol_name="cephfs-ec",
        )

        if get_pool_status_after["used"] < get_pool_status_before["used"]:
            log.error("Pool attached is unused")
            return 1
        if get_pool_status_after_EC["used"] < get_pool_status_before_EC["used"]:
            log.info("EC Pool attached is unused")
            return 1

        log.info("Clean up the system")
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[0]],
                                mounting_dir=kernel_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[1]],
                                mounting_dir=kernel_mounting_dir_2)

        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[1]],
                                mounting_dir=fuse_mounting_dir_2)

        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")

        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
        log.info(
            "Remove the data pools from the filesystem and delete the created pools."
        )
        rm_pool_from_FS = [
            "ceph fs rm_data_pool cephfs cephfs-data-pool-layout",
            "ceph fs rm_data_pool cephfs-ec cephfs-data-pool-layout-ec",
            "ceph osd pool delete cephfs-data-pool-layout "
            "cephfs-data-pool-layout --yes-i-really-really-mean-it-not-faking",
            "ceph osd pool delete cephfs-data-pool-layout-ec "
            "cephfs-data-pool-layout-ec --yes-i-really-really-mean-it-not-faking",
        ]
        for cmd in rm_pool_from_FS:
            clients[0].exec_command(sudo=True, cmd=cmd)

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573870 - Create 2 Filesystem with default values on different MDS daemons
    Pre-requisites :
    1. We need atleast one client node to execute this test case

    Test Case Flow:
    1. Create 2 file systems with placement arguments
    2. validate the mds came on the specified placements
    3. mount both the file systems and using fuse mount
    4. Run IOs on the FS
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))

        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        client1 = clients[0]
        out, rc = client1.exec_command(
            sudo=True, cmd="ceph orch ps --daemon_type mds -f json")
        daemon_ls_before = json.loads(out.read().decode())
        daemon_count_before = len(daemon_ls_before)
        host_list = [
            client1.node.hostname.replace("node7", "node2"),
            client1.node.hostname.replace("node7", "node3"),
        ]
        hosts = " ".join(host_list)
        client1.exec_command(
            sudo=True,
            cmd=f"ceph fs volume create cephfs_new --placement='2 {hosts}'",
            check_ec=False,
        )
        fs_util.wait_for_mds_process(client1, "cephfs_new")
        out, rc = client1.exec_command(
            sudo=True, cmd="ceph orch ps --daemon_type mds -f json")
        daemon_ls_after = json.loads(out.read().decode())
        daemon_count_after = len(daemon_ls_after)
        assert daemon_count_after > daemon_count_before, (
            "daemon count is reduced after creating FS. "
            "Expectation is MDS daemons whould be more")

        total_fs = fs_util.get_fs_details(client1)
        if len(total_fs) < 2:
            log.error(
                "We can't proceed with the test case as we are not able to create 2 filesystems"
            )
        fs_names = [fs["name"] for fs in total_fs]
        validate_mds_placements("cephfs_new", daemon_ls_after, hosts, 2)
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"

        fs_util.fuse_mount([clients[0]],
                           fuse_mounting_dir_1,
                           extra_params=f"--client_fs {fs_names[0]}")
        fs_util.fuse_mount([clients[0]],
                           fuse_mounting_dir_2,
                           extra_params=f"--client_fs {fs_names[1]}")
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{fuse_mounting_dir_1}",
            long_running=True,
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{fuse_mounting_dir_2}",
            long_running=True,
        )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_2)
        commands = [
            "ceph config set mon mon_allow_pool_delete true",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        fs_util.remove_fs(client1, "cephfs_new")
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name> --mode <octal_value>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create cephfs subvolumegroup with different octal modes")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1",
                "mode": "777",
            },
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_2",
                "mode": "700",
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1",
                "mode": "755",
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info("Create 2 Sub volumes on each of the subvolume group of Size 5GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_2",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_2",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_5",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_6",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernel and 1 subvolume on Fuse → Client1"
        )
        if build.startswith("5"):
            kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_1,
                ",".join(mon_node_ips),
                extra_params=f",fs={default_fs}",
            )
            fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_1,
                extra_params=f" --client_fs {default_fs}",
            )

            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_3 subvolgroup_2",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
                extra_params=f",fs={default_fs}",
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_4 subvolgroup_2",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_2,
                extra_params=f" --client_fs {default_fs}",
            )

            kernel_mounting_dir_3 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")
            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_5 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_3,
                ",".join(mon_node_ips),
                extra_params=",fs=cephfs-ec",
            )
            fuse_mounting_dir_3 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_6 subvolgroup_ec1",
            )
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_3,
                extra_params=" --client_fs cephfs-ec",
            )
        else:
            kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_1,
                ",".join(mon_node_ips),
            )
            fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_1,
            )

            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_3 subvolgroup_2",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_4 subvolgroup_2",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_2,
            )

            kernel_mounting_dir_3 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")
            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_5 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_3,
                ",".join(mon_node_ips),
                extra_params=",fs=cephfs-ec",
            )
            fuse_mounting_dir_3 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_6 subvolgroup_ec1",
            )
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_3,
                extra_params=" --client_fs cephfs-ec",
            )

        log.info("Get the path of subvolume groups")
        subvolgroup1_getpath, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolumegroup getpath {default_fs} subvolgroup_1",
        )
        subvolgroup1_getpath = subvolgroup1_getpath.strip()

        subvolgroup2_getpath, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolumegroup getpath {default_fs} subvolgroup_2",
        )
        subvolgroup2_getpath = subvolgroup2_getpath.strip()

        subvolgroup_ec_getpath, rc = clients[0].exec_command(
            sudo=True,
            cmd="ceph fs subvolumegroup getpath cephfs-ec subvolgroup_ec1",
        )
        subvolgroup_ec_getpath = subvolgroup_ec_getpath.strip()

        def get_defined_mode(group_name, subvolumegroup_list):
            for subvolumegroup in subvolumegroup_list:
                if group_name == subvolumegroup["group_name"]:
                    return subvolumegroup.get("mode")

        log.info("Validate the octal mode set on the subgroup")
        subgroup_1_mode = get_defined_mode(
            "subvolgroup_1", subvolumegroup_list=subvolumegroup_list
        )
        subgroup_2_mode = get_defined_mode(
            "subvolgroup_2", subvolumegroup_list=subvolumegroup_list
        )
        subgroup_ec_mode = get_defined_mode(
            "subvolgroup_ec1", subvolumegroup_list=subvolumegroup_list
        )
        stat_of_octal_mode_on_kernel_dir1 = fs_util.get_stats(
            client=clients[0],
            file_path=kernel_mounting_dir_1.rstrip("/") + subvolgroup1_getpath,
            format="%a",
        )
        stat_of_octal_mode_on_kernel_dir2 = fs_util.get_stats(
            client=clients[0],
            file_path=kernel_mounting_dir_2.rstrip("/") + subvolgroup2_getpath,
            format="%a",
        )
        stat_of_octal_mode_on_kernel_dir3 = fs_util.get_stats(
            client=clients[1],
            file_path=kernel_mounting_dir_3.rstrip("/") + subvolgroup_ec_getpath,
            format="%a",
        )
        stat_of_octal_mode_on_fuse_dir1 = fs_util.get_stats(
            client=clients[0],
            file_path=fuse_mounting_dir_1.rstrip("/") + subvolgroup1_getpath,
            format="%a",
        )
        stat_of_octal_mode_on_fuse_dir2 = fs_util.get_stats(
            client=clients[0],
            file_path=fuse_mounting_dir_2.rstrip("/") + subvolgroup2_getpath,
            format="%a",
        )
        stat_of_octal_mode_on_fuse_dir3 = fs_util.get_stats(
            client=clients[1],
            file_path=fuse_mounting_dir_3.rstrip("/") + subvolgroup_ec_getpath,
            format="%a",
        )

        if int(subgroup_1_mode) != int(stat_of_octal_mode_on_kernel_dir1) and int(
            subgroup_1_mode
        ) != int(stat_of_octal_mode_on_fuse_dir1):
            log.error("Octal values are mismatching on subvolgroup_1")
            return 1
        if int(subgroup_2_mode) != int(stat_of_octal_mode_on_kernel_dir2) and int(
            subgroup_2_mode
        ) != int(stat_of_octal_mode_on_fuse_dir2):
            log.error("Octal values are mismatching on subvolgroup_2")
            return 1
        if int(subgroup_ec_mode) != int(stat_of_octal_mode_on_kernel_dir3) and int(
            subgroup_ec_mode
        ) != int(stat_of_octal_mode_on_fuse_dir3):
            log.error("Octal values are mismatching on subvolgroup_ec1")
            return 1

        log.info("Run IO's")
        with parallel() as p:
            for i in [
                kernel_mounting_dir_1,
                fuse_mounting_dir_1,
                kernel_mounting_dir_2,
                fuse_mounting_dir_2,
            ]:
                p.spawn(fs_util.run_ios, clients[0], i)

            for i in [kernel_mounting_dir_3, fuse_mounting_dir_3]:
                p.spawn(fs_util.run_ios, clients[1], i)

        log.info("Clean up the system")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)

        for i in [kernel_mounting_dir_1, kernel_mounting_dir_2]:
            fs_util.client_clean_up(
                "umount", kernel_clients=[clients[0]], mounting_dir=i
            )

        for i in [fuse_mounting_dir_1, fuse_mounting_dir_2]:
            fs_util.client_clean_up("umount", fuse_clients=[clients[0]], mounting_dir=i)

        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[1]], mounting_dir=kernel_mounting_dir_3
        )
        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[1]], mounting_dir=fuse_mounting_dir_3
        )

        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true"
        )

        return 0
    except Exception as e:
        log.error(e)
        log.error(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's
    4. ceph fs subvolume resize <vol_name> <subvolume_name> <new_size> [--group_name <subvol_group>]
    5. Mount subvolume on both fuse and kernel clients and run IO's
    6. Validate the resize.

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. Remove the pools added as part of pool_layout
    3. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    4. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create SubVolumeGroups on each filesystem")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1"
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info(
            "Create 2 Sub volumes on each of the subvolume group with Size 2GB"
        )
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "2147483648",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_1",
                "size": "2147483648",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_ec1",
                "size": "2147483648",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_ec1",
                "size": "2147483648",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        log.info("Validate the Subvolume Size")
        for subvolume in subvolume_list:
            subvolume_size_subvol = fs_util.get_subvolume_info(
                client=clients[0], **subvolume)
            if int(subvolume.get("size", "infinite")) != int(
                    subvolume_size_subvol["bytes_quota"]):
                log.error(
                    f"Size mismatchiing for {subvolume.get('subvol_name')} "
                    f"Expected size is : {subvolume.get('size', 'infinite')}"
                    f"Actual Size: {subvolume_size_subvol['bytes_quota']}")
                return 1

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernel and 1 subvloume on Fuse → Client1"
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.strip()}",
        )

        log.info(
            "On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2"
        )
        if build.startswith("5"):
            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_3 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.strip()}",
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_4 subvolgroup_ec1",
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_2,
                extra_params=f" -r {subvol_path.strip()} --client_fs cephfs-ec",
            )

        run_ios(
            clients[0],
            kernel_mounting_dir_1,
            file_name="dd_before",
            bs="100M",
            count=20,
        )
        run_ios(clients[0],
                fuse_mounting_dir_1,
                file_name="dd_before",
                bs="100M",
                count=20)
        run_ios(
            clients[1],
            kernel_mounting_dir_2,
            file_name="dd_before",
            bs="100M",
            count=20,
        )
        run_ios(clients[1],
                fuse_mounting_dir_2,
                file_name="dd_before",
                bs="100M",
                count=20)

        log.info("Resize the subvolumes to 5GB and add data more than 2GB")
        resize_subvolumes = [
            f"ceph fs subvolume resize {default_fs} subvol_1 5368709120 --group_name subvolgroup_1",
            f"ceph fs subvolume resize {default_fs} subvol_2 5368709120 --group_name subvolgroup_1",
            "ceph fs subvolume resize cephfs-ec subvol_3 5368709120 --group_name subvolgroup_ec1",
            "ceph fs subvolume resize cephfs-ec subvol_4 5368709120 --group_name subvolgroup_ec1",
        ]
        for cmd in resize_subvolumes:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Validate the Subvolume after Resize")
        for subvolume in subvolume_list:
            subvolume_size_subvol = fs_util.get_subvolume_info(
                client=clients[0], **subvolume)
            if 5368709120 != int(subvolume_size_subvol["bytes_quota"]):
                log.error(
                    f"Size mismatchiing for {subvolume.get('subvol_name')} Expected size is : 5368709120"
                    f"Actual Size: {subvolume_size_subvol['bytes_quota']}")
                return 1

        run_ios(clients[0],
                kernel_mounting_dir_1,
                file_name="dd_after",
                bs="100M",
                count=30)
        run_ios(clients[0],
                fuse_mounting_dir_1,
                file_name="dd_after",
                bs="100M",
                count=30)
        run_ios(clients[1],
                kernel_mounting_dir_2,
                file_name="dd_after",
                bs="100M",
                count=30)
        run_ios(clients[1],
                fuse_mounting_dir_2,
                file_name="dd_after",
                bs="100M",
                count=30)

        log.info("Clean up the system")
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[0]],
                                mounting_dir=kernel_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[1]],
                                mounting_dir=kernel_mounting_dir_2)

        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[1]],
                                mounting_dir=fuse_mounting_dir_2)

        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")

        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Esempio n. 7
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. Requires 2 client nodes in the setup

    Operations performed :
    1. Enable Multiple File systems 1. In Replicated 2. In EC
    2. Create 2 SubVolumeGroups on each file system
    3. Create 2 Sub volumes on each of the subvolume group Size 20 GB
    4. Create 2 sub volumes on default subvolume group
    5. Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client1
    6. Mount 1 subvolumeon kernal and 1 subvloumegroup/subvolume on Fuse → Client2
    7. On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2
    8. On EC,Mount 1 subvolumeon kernal and 1 subvloumegroup/subvolume on Fuse → Client1
    9. Run IOs on subvolumegroup/subvolume on kernel client and subvolume in Fuse clientusing below commands
        git clone https://github.com/distributed-system-analysis/smallfile.git
        cd smallfile
        for i in create read append read delete create overwrite rename delete-renamed mkdir rmdir create symlink
        stat chmod ls-l delete cleanup  ;
        do python3 smallfile_cli.py --operation $i --threads 8 --file-size 10240 --files 100 --top /mnt/kcephfs/vol5/
        ; done
        IO Tool 2 :
        wget -O linux.tar.gz http://download.ceph.com/qa/linux-5.4.tar.gz
        tar -xzf linux.tar.gz tardir/ ; sleep 10 ; rm -rf  tardir/ ; sleep 10 ; done
        DD on Each volume:

        Wget :
       http://download.eng.bos.redhat.com/fedora/linux/releases/34/Server/x86_64/iso/Fedora-Server-dvd-x86_64-34-1.2.iso
       http://download.eng.bos.redhat.com/fedora/linux/releases/33/Server/x86_64/iso/Fedora-Server-dvd-x86_64-33-1.2.iso
       http://download.eng.bos.redhat.com/fedora/linux/releases/32/Server/x86_64/iso/Fedora-Server-dvd-x86_64-32-1.6.iso
        Note : Run 2 IO tools mentioned above on each volume mounted
    10. Create Snapshots.Verify the snap ls | grep to see snap ot created
    11. Create Clones on all 8 volumes. Verify the clones got created using clone ls(subvolume ls)
    12. Set File Level Quota on 2 directories under subvolume and Size Level Quota on under 2 directories
        under subvolume.
    13. Verify quota based on your configuration
    14. Clear Quotas
    15. Remove Clones
    16. Remove Snapshots
    17. Unmount
    18. Remove Volumes.

    Args:
        ceph_cluster:
        **kw:

    Returns:

    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)
        log.info("Create 2 SubVolumeGroups on each file system")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_1"},
            {"vol_name": default_fs, "group_name": "subvolgroup_2"},
            {"vol_name": "cephfs-ec", "group_name": "subvolgroup_1"},
            {"vol_name": "cephfs-ec", "group_name": "subvolgroup_2"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)
        log.info("Create 2 Sub volumes on each of the subvolume group Size 20 GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_2",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_2",
                "size": "5368706371",
            },
            {"vol_name": default_fs, "subvol_name": "subvol_5", "size": "5368706371"},
            {"vol_name": default_fs, "subvol_name": "subvol_6", "size": "5368706371"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_7", "size": "5368706371"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_8", "size": "5368706371"},
            {"vol_name": default_fs, "subvol_name": "subvol_9", "size": "5368706371"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_10", "size": "5368706371"},
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client1"
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {default_fs} subvol_5"
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )

        log.info(
            "Mount 1 subvolumeon kernal and 1 subvloumegroup/subvolume on Fuse → Client2"
        )
        kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")

        subvol_path, rc = clients[1].exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {default_fs} subvol_6"
        )
        fs_util.kernel_mount(
            [clients[1]],
            kernel_mounting_dir_2,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[1].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_2",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [clients[1]],
            fuse_mounting_dir_2,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )

        log.info(
            "On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2"
        )
        if build.startswith("5"):
            kernel_mounting_dir_3 = f"/mnt/cephfs_kernel{mounting_dir}_EC_3/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_3 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_3,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.read().decode().strip()}",
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[0].exec_command(
                sudo=True, cmd="ceph fs subvolume getpath cephfs-ec subvol_7"
            )
            fuse_mounting_dir_3 = f"/mnt/cephfs_fuse{mounting_dir}_EC_3/"
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_3,
                extra_params=f" -r {subvol_path.read().decode().strip()} --client_fs cephfs-ec",
            )

        log.info(
            "On EC,Mount 1 subvolumeon kernal and 1 subvloumegroup/subvolume on Fuse → Client1"
        )
        if build.startswith("5"):
            kernel_mounting_dir_4 = f"/mnt/cephfs_kernel{mounting_dir}_EC_4/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[1].exec_command(
                sudo=True, cmd="ceph fs subvolume getpath cephfs-ec subvol_8"
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_4,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.read().decode().strip()}",
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_4 subvolgroup_2",
            )
            fuse_mounting_dir_4 = f"/mnt/cephfs_fuse{mounting_dir}_EC_4/"
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_4,
                extra_params=f" -r {subvol_path.read().decode().strip()} --client_fs cephfs-ec",
            )

        run_ios(clients[0], kernel_mounting_dir_1)
        run_ios(clients[0], fuse_mounting_dir_1)
        run_ios(clients[1], kernel_mounting_dir_2)
        run_ios(clients[1], fuse_mounting_dir_2)
        if build.startswith("5"):
            run_ios(clients[0], kernel_mounting_dir_3)
            run_ios(clients[1], kernel_mounting_dir_4)
            run_ios(clients[0], fuse_mounting_dir_3)
            run_ios(clients[1], fuse_mounting_dir_4)

        log.info("Create Snapshots.Verify the snap ls")
        snapshot_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "snap_name": "snap_1",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "snap_name": "snap_2",
                "group_name": "subvolgroup_2",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "snap_name": "snap_3",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "snap_name": "snap_4",
                "group_name": "subvolgroup_2",
            },
            {"vol_name": default_fs, "subvol_name": "subvol_5", "snap_name": "snap_5"},
            {"vol_name": default_fs, "subvol_name": "subvol_6", "snap_name": "snap_6"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_7", "snap_name": "snap_7"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_8", "snap_name": "snap_8"},
        ]
        for snapshot in snapshot_list:
            fs_util.create_snapshot(clients[0], **snapshot)

        clone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "snap_name": "snap_1",
                "target_subvol_name": "clone_1",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "snap_name": "snap_2",
                "target_subvol_name": "clone_2",
                "group_name": "subvolgroup_2",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "snap_name": "snap_3",
                "target_subvol_name": "clone_3",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "snap_name": "snap_4",
                "target_subvol_name": "clone_4",
                "group_name": "subvolgroup_2",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_5",
                "snap_name": "snap_5",
                "target_subvol_name": "clone_5",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_6",
                "snap_name": "snap_6",
                "target_subvol_name": "clone_6",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_7",
                "snap_name": "snap_7",
                "target_subvol_name": "clone_7",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_8",
                "snap_name": "snap_8",
                "target_subvol_name": "clone_8",
            },
        ]
        for clone in clone_list:
            fs_util.create_clone(clients[0], **clone)
        log.info(
            "Set File Level Quota on 2 directories under subvolume and Size Level Quota on "
            "under 2 directories under subvolume"
        )
        subvol_path, rc = clients[0].exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {default_fs} subvol_9"
        )
        fuse_mounting_dir_5 = f"/mnt/cephfs_fuse{mounting_dir}_5/"
        fs_util.fuse_mount(
            [clients[1]],
            fuse_mounting_dir_5,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )
        clients[1].exec_command(
            sudo=True,
            cmd=f"setfattr -n ceph.quota.max_files -v 10 {fuse_mounting_dir_5}",
        )
        clients[1].exec_command(
            sudo=True, cmd=f"getfattr -n ceph.quota.max_files {fuse_mounting_dir_5}"
        )
        out, rc = clients[1].exec_command(
            sudo=True,
            cmd=f"cd {fuse_mounting_dir_5};touch quota{{1..15}}.txt",
        )
        log.info(out)
        if clients[1].node.exit_status == 0:
            log.warning(
                "Quota set has been failed,Able to create more files."
                "This is known limitation"
            )
        if build.startswith("5"):
            subvol_path, rc = clients[0].exec_command(
                sudo=True, cmd="ceph fs subvolume getpath cephfs-ec subvol_10"
            )
            kernel_mounting_dir_5 = f"/mnt/cephfs_kernel{mounting_dir}_5/"
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_5,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.read().decode().strip()}",
                extra_params=",fs=cephfs-ec",
            )
            clients[1].exec_command(
                sudo=True,
                cmd=f"setfattr -n ceph.quota.max_files -v 10 {kernel_mounting_dir_5}",
            )
            clients[1].exec_command(
                sudo=True,
                cmd=f"getfattr -n ceph.quota.max_files {kernel_mounting_dir_5}",
            )

            out, rc = clients[1].exec_command(
                sudo=True,
                cmd=f"cd {kernel_mounting_dir_5};touch quota{{1..15}}.txt",
            )
            log.info(out)
            if clients[1].node.exit_status == 0:
                log.warning(
                    "Quota set has been failed,Able to create more files."
                    "This is known limitation"
                )
                # return 1

        log.info("Clean up the system")
        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[0]], mounting_dir=kernel_mounting_dir_1
        )
        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[1]], mounting_dir=kernel_mounting_dir_2
        )
        if build.startswith("5"):
            fs_util.client_clean_up(
                "umount",
                kernel_clients=[clients[0]],
                mounting_dir=kernel_mounting_dir_3,
            )

            fs_util.client_clean_up(
                "umount",
                kernel_clients=[clients[1]],
                mounting_dir=kernel_mounting_dir_4,
            )
            fs_util.client_clean_up(
                "umount",
                kernel_clients=[clients[1]],
                mounting_dir=kernel_mounting_dir_5,
            )
        fs_util.client_clean_up(
            "umount", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir_1
        )
        fs_util.client_clean_up(
            "umount", fuse_clients=[clients[1]], mounting_dir=fuse_mounting_dir_2
        )
        if build.startswith("5"):
            fs_util.client_clean_up(
                "umount", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir_3
            )
            fs_util.client_clean_up(
                "umount", fuse_clients=[clients[1]], mounting_dir=fuse_mounting_dir_4
            )
        fs_util.client_clean_up(
            "umount", fuse_clients=[clients[1]], mounting_dir=fuse_mounting_dir_5
        )
        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true"
        )
        rmsnapshot_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "snap_name": "snap_1",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "snap_name": "snap_2",
                "group_name": "subvolgroup_2",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "snap_name": "snap_3",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "snap_name": "snap_4",
                "group_name": "subvolgroup_2",
            },
            {"vol_name": default_fs, "subvol_name": "subvol_5", "snap_name": "snap_5"},
            {"vol_name": default_fs, "subvol_name": "subvol_6", "snap_name": "snap_6"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_7", "snap_name": "snap_7"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_8", "snap_name": "snap_8"},
        ]
        for snapshot in rmsnapshot_list:
            fs_util.remove_snapshot(clients[0], **snapshot)

        rmclone_list = [
            {"vol_name": default_fs, "subvol_name": "clone_1"},
            {"vol_name": default_fs, "subvol_name": "clone_2"},
            {"vol_name": "cephfs-ec", "subvol_name": "clone_3"},
            {"vol_name": "cephfs-ec", "subvol_name": "clone_4"},
            {"vol_name": default_fs, "subvol_name": "clone_5"},
            {"vol_name": default_fs, "subvol_name": "clone_6"},
            {"vol_name": "cephfs-ec", "subvol_name": "clone_7"},
            {"vol_name": "cephfs-ec", "subvol_name": "clone_8"},
        ]
        rmsubvolume_list = rmclone_list + subvolume_list

        for subvolume in rmsubvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Esempio n. 8
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573867 - Create 4-5 Filesystem randomly on different MDS daemons

    Pre-requisites :
    1. We need atleast one client node to execute this test case

    Test Case Flow:
    1. Create 5 file systems with default values
    2. Validate the mds counts and the file systems counts
    3. mount all the file systems and using fuse mount
    4. Run IOs on the FSs
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))

        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        client1 = clients[0]
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        for i in range(1, 5):
            out, rc = client1.exec_command(
                sudo=True, cmd="ceph orch ps --daemon_type mds -f json")
            daemon_ls_before = json.loads(out)
            daemon_count_before = len(daemon_ls_before)
            client1.exec_command(
                sudo=True,
                cmd=f"ceph fs volume create cephfs_{i}",
                check_ec=False,
            )
            fs_util.wait_for_mds_process(client1, f"cephfs_{i}")
            out_after, rc = client1.exec_command(
                sudo=True, cmd="ceph orch ps --daemon_type mds -f json")
            daemon_ls_after = json.loads(out_after)
            daemon_count_after = len(daemon_ls_after)
            assert daemon_count_after > daemon_count_before, (
                f"daemon count is reduced after creating FS. Demons count before : {daemon_count_before} ;"
                f"after:{daemon_count_after}"
                "Expectation is MDS daemons whould be more")
            fuse_mounting_dir = f"/mnt/cephfs_fuse{mounting_dir}_{i}/"
            fs_util.fuse_mount([clients[0]],
                               fuse_mounting_dir,
                               extra_params=f"--client_fs cephfs_{i}")
            client1.exec_command(
                sudo=True,
                cmd=
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400"
                f" --files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
                f"{fuse_mounting_dir}",
                long_running=True,
            )
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        for i in range(1, 5):
            fs_util.client_clean_up(
                "umount",
                fuse_clients=[clients[0]],
                mounting_dir=f"/mnt/cephfs_fuse{mounting_dir}_{i}/",
            )
        client1.exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")
        [fs_util.remove_fs(client1, f"cephfs_{i}") for i in range(1, 5)]
Esempio n. 9
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573871   Explore ceph-fuse mount of more than 2 Filesystem on same client.
                    Also verify persistent mounts upon reboots.
    Pre-requisites :
    1. We need atleast one client node to execute this test case

    Test Case Flow:
    1. Create 2 file systems if not present
    2. mount both the file systems and using fuse mount and fstab entry
    3. reboot the node
    4. validate if the mount points are still present
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        client1 = clients[0]
        client1.exec_command(sudo=True,
                             cmd="ceph fs volume create cephfs_new",
                             check_ec=False)
        fs_util.wait_for_mds_process(client1, "cephfs_new")
        total_fs = fs_util.get_fs_details(client1)
        if len(total_fs) < 2:
            log.error(
                "We can't proceed with the test case as we are not able to create 2 filesystems"
            )

        fs_names = [fs["name"] for fs in total_fs]
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"

        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f"--client_fs {fs_names[0]}",
            fstab=True,
        )
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_2,
            extra_params=f"--client_fs {fs_names[1]}",
            fstab=True,
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{fuse_mounting_dir_1}",
            long_running=True,
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{fuse_mounting_dir_2}",
            long_running=True,
        )
        fs_util.reboot_node(client1)
        out, rc = client1.exec_command(cmd="mount")
        mount_output = out.split()
        log.info("validate fuse mount:")
        assert fuse_mounting_dir_1.rstrip(
            "/") in mount_output, "fuse mount failed"
        assert fuse_mounting_dir_2.rstrip(
            "/") in mount_output, "fuse mount failed"
        client1.exec_command(
            sudo=True, cmd=f"mkdir -p {fuse_mounting_dir_1}/io_after_reboot")
        client1.exec_command(
            sudo=True, cmd=f"mkdir -p {fuse_mounting_dir_2}/io_after_reboot")
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{fuse_mounting_dir_1}/io_after_reboot",
            long_running=True,
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{fuse_mounting_dir_2}/io_after_reboot",
            long_running=True,
        )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_2)
        commands = [
            "ceph config set mon mon_allow_pool_delete true",
            "ceph fs volume rm cephfs_new --yes-i-really-mean-it",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        client1.exec_command(sudo=True,
                             cmd="mv /etc/fstab.backup /etc/fstab",
                             check_ec=False)
Esempio n. 10
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec filesystem

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's
    4. Mount subvolume on both fuse and kernel clients and run IO's
    5. Move data b/w FS created on Replicated Pool and EC_Pool

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. Remove the pools added as part of pool_layout
    3. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    4. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create SubVolumeGroups on each filesystem")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1"
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info("Create 2 Sub volumes on each of the pool with Size 5GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368709120",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_ec1",
                "size": "5368709120",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        log.info(
            "Mount 1 subvolume on kernel and 1 subvloume on Fuse → Client1")
        if build.startswith("5"):
            kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of subvolume on default filesystem")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_1,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.strip()}",
                extra_params=f",fs={default_fs}",
            )
            log.info("Get the path of subvolume on EC filesystem")
            fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_2 subvolgroup_ec1",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_1,
                extra_params=f" -r {subvol_path.strip()} --client_fs cephfs-ec",
            )
        else:
            kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of subvolume on default filesystem")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_1,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.strip()}",
            )
            log.info("Get the path of subvolume on EC filesystem")
            fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_2 subvolgroup_ec1",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_1,
                extra_params=f" -r {subvol_path.strip()}",
            )

        run_ios(clients[0],
                kernel_mounting_dir_1,
                file_name="dd_file1",
                bs="100M",
                count=20)
        run_ios(clients[0],
                fuse_mounting_dir_1,
                file_name="dd_file2",
                bs="100M",
                count=20)

        log.info(
            "Migrate data b/w EC pool and Replicated Pool and vice versa.")
        filepath1 = f"{kernel_mounting_dir_1}{clients[0].node.hostname}dd_file1"
        filepath2 = f"{fuse_mounting_dir_1}{clients[0].node.hostname}dd_file2"

        mv_bw_pools = [
            f"mv {filepath1} {fuse_mounting_dir_1}",
            f"mv {filepath2} {kernel_mounting_dir_1}",
        ]
        for cmd in mv_bw_pools:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Confirm if data b/w pools are migrated")
        verify_data_movement = [
            f" ls -l {kernel_mounting_dir_1}{clients[0].node.hostname}dd_file2",
            f" ls -l {fuse_mounting_dir_1}{clients[0].node.hostname}dd_file1",
        ]
        for cmd in verify_data_movement:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Clean up the system")
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[0]],
                                mounting_dir=kernel_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")

        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Esempio n. 11
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573872   Explore kernel mount of more than 2 Filesystem on same client.
                    Also verify persistent mounts upon reboots.
    Pre-requisites :
    1. We need atleast one client node to execute this test case

    Test Case Flow:
    1. Create 2 file systems if not present
    2. mount both the file systems and usingkernel mount and fstab entry
    3. reboot the node
    4. validate if the mount points are still present
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        client1 = clients[0]
        client1.exec_command(sudo=True,
                             cmd="ceph fs volume create cephfs_new",
                             check_ec=False)
        total_fs = fs_util.get_fs_details(client1)
        if len(total_fs) < 2:
            log.error(
                "We can't proceed with the test case as we are not able to create 2 filesystems"
            )

        fs_names = [fs["name"] for fs in total_fs]
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            extra_params=f",fs={fs_names[0]}",
            fstab=True,
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_2,
            ",".join(mon_node_ips),
            extra_params=f",fs={fs_names[1]}",
            fstab=True,
        )
        fs_util.reboot_node(client1)
        out, rc = client1.exec_command(cmd="mount")
        mount_output = out.read().decode()
        mount_output = mount_output.split()
        log.info("validate kernel mount:")
        assert kernel_mounting_dir_1.rstrip(
            "/") in mount_output, "Kernel mount failed"
        assert kernel_mounting_dir_2.rstrip(
            "/") in mount_output, "Kernel mount failed"
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        fs_util.client_clean_up(
            "umount",
            kernel_clients=[clients[0]],
            mounting_dir=kernel_mounting_dir_1,
        )
        fs_util.client_clean_up(
            "umount",
            kernel_clients=[clients[0]],
            mounting_dir=kernel_mounting_dir_2,
        )

        commands = [
            "ceph config set mon mon_allow_pool_delete true",
            "ceph fs volume rm cephfs_new --yes-i-really-mean-it",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        client1.exec_command(sudo=True,
                             cmd="cp /etc/fstab.backup /etc/fstab",
                             check_ec=False)