예제 #1
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec filesystem

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's
    4. Mount subvolume on both fuse and kernel clients and run IO's
    5. Move data b/w FS created on Replicated Pool and EC_Pool

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. Remove the pools added as part of pool_layout
    3. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    4. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create SubVolumeGroups on each filesystem")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1"
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info("Create 2 Sub volumes on each of the pool with Size 5GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368709120",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_ec1",
                "size": "5368709120",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        log.info(
            "Mount 1 subvolume on kernel and 1 subvloume on Fuse → Client1")
        if build.startswith("5"):
            kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of subvolume on default filesystem")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_1,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.strip()}",
                extra_params=f",fs={default_fs}",
            )
            log.info("Get the path of subvolume on EC filesystem")
            fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_2 subvolgroup_ec1",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_1,
                extra_params=f" -r {subvol_path.strip()} --client_fs cephfs-ec",
            )
        else:
            kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of subvolume on default filesystem")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_1,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.strip()}",
            )
            log.info("Get the path of subvolume on EC filesystem")
            fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_2 subvolgroup_ec1",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_1,
                extra_params=f" -r {subvol_path.strip()}",
            )

        run_ios(clients[0],
                kernel_mounting_dir_1,
                file_name="dd_file1",
                bs="100M",
                count=20)
        run_ios(clients[0],
                fuse_mounting_dir_1,
                file_name="dd_file2",
                bs="100M",
                count=20)

        log.info(
            "Migrate data b/w EC pool and Replicated Pool and vice versa.")
        filepath1 = f"{kernel_mounting_dir_1}{clients[0].node.hostname}dd_file1"
        filepath2 = f"{fuse_mounting_dir_1}{clients[0].node.hostname}dd_file2"

        mv_bw_pools = [
            f"mv {filepath1} {fuse_mounting_dir_1}",
            f"mv {filepath2} {kernel_mounting_dir_1}",
        ]
        for cmd in mv_bw_pools:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Confirm if data b/w pools are migrated")
        verify_data_movement = [
            f" ls -l {kernel_mounting_dir_1}{clients[0].node.hostname}dd_file2",
            f" ls -l {fuse_mounting_dir_1}{clients[0].node.hostname}dd_file1",
        ]
        for cmd in verify_data_movement:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Clean up the system")
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[0]],
                                mounting_dir=kernel_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")

        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's
    4. ceph fs subvolume resize <vol_name> <subvolume_name> <new_size> [--group_name <subvol_group>]
    5. Mount subvolume on both fuse and kernel clients and run IO's
    6. Validate the resize.

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. Remove the pools added as part of pool_layout
    3. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    4. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create SubVolumeGroups on each filesystem")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1"
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info(
            "Create 2 Sub volumes on each of the subvolume group with Size 2GB"
        )
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "2147483648",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_1",
                "size": "2147483648",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_ec1",
                "size": "2147483648",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_ec1",
                "size": "2147483648",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        log.info("Validate the Subvolume Size")
        for subvolume in subvolume_list:
            subvolume_size_subvol = fs_util.get_subvolume_info(
                client=clients[0], **subvolume)
            if int(subvolume.get("size", "infinite")) != int(
                    subvolume_size_subvol["bytes_quota"]):
                log.error(
                    f"Size mismatchiing for {subvolume.get('subvol_name')} "
                    f"Expected size is : {subvolume.get('size', 'infinite')}"
                    f"Actual Size: {subvolume_size_subvol['bytes_quota']}")
                return 1

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernel and 1 subvloume on Fuse → Client1"
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.strip()}",
        )

        log.info(
            "On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2"
        )
        if build.startswith("5"):
            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_3 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.strip()}",
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_4 subvolgroup_ec1",
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_2,
                extra_params=f" -r {subvol_path.strip()} --client_fs cephfs-ec",
            )

        run_ios(
            clients[0],
            kernel_mounting_dir_1,
            file_name="dd_before",
            bs="100M",
            count=20,
        )
        run_ios(clients[0],
                fuse_mounting_dir_1,
                file_name="dd_before",
                bs="100M",
                count=20)
        run_ios(
            clients[1],
            kernel_mounting_dir_2,
            file_name="dd_before",
            bs="100M",
            count=20,
        )
        run_ios(clients[1],
                fuse_mounting_dir_2,
                file_name="dd_before",
                bs="100M",
                count=20)

        log.info("Resize the subvolumes to 5GB and add data more than 2GB")
        resize_subvolumes = [
            f"ceph fs subvolume resize {default_fs} subvol_1 5368709120 --group_name subvolgroup_1",
            f"ceph fs subvolume resize {default_fs} subvol_2 5368709120 --group_name subvolgroup_1",
            "ceph fs subvolume resize cephfs-ec subvol_3 5368709120 --group_name subvolgroup_ec1",
            "ceph fs subvolume resize cephfs-ec subvol_4 5368709120 --group_name subvolgroup_ec1",
        ]
        for cmd in resize_subvolumes:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Validate the Subvolume after Resize")
        for subvolume in subvolume_list:
            subvolume_size_subvol = fs_util.get_subvolume_info(
                client=clients[0], **subvolume)
            if 5368709120 != int(subvolume_size_subvol["bytes_quota"]):
                log.error(
                    f"Size mismatchiing for {subvolume.get('subvol_name')} Expected size is : 5368709120"
                    f"Actual Size: {subvolume_size_subvol['bytes_quota']}")
                return 1

        run_ios(clients[0],
                kernel_mounting_dir_1,
                file_name="dd_after",
                bs="100M",
                count=30)
        run_ios(clients[0],
                fuse_mounting_dir_1,
                file_name="dd_after",
                bs="100M",
                count=30)
        run_ios(clients[1],
                kernel_mounting_dir_2,
                file_name="dd_after",
                bs="100M",
                count=30)
        run_ios(clients[1],
                fuse_mounting_dir_2,
                file_name="dd_after",
                bs="100M",
                count=30)

        log.info("Clean up the system")
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[0]],
                                mounting_dir=kernel_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[1]],
                                mounting_dir=kernel_mounting_dir_2)

        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[1]],
                                mounting_dir=fuse_mounting_dir_2)

        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")

        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #3
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573501	Create a Cloned Volume using a snapshot

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. creats fs volume create cephfs if the volume is not there
    3. Create 2 sub volume groups
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_1
             ceph fs subvolumegroup create cephfs subvolgroup_2
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_clone_status --size 5368706371 --group_name subvolgroup_
    5. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    6. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_clone_status snap_1 --group_name subvolgroup_1

    Clone Operations and Clone States:
    1. Create a clone in default locaction.
        ceph fs subvolume snapshot clone cephfs subvol_clone_status snap_1 clone_status_1 --group_name subvolgroup_1
    2. Validate all the states of clone creation progress
        pending : Clone operation has not started
        in-progress : Clone operation is in progress
        complete : Clone operation has successfully finished
    3. Mount the cloned volume and check the contents
    4. Create a clone in different subvolumegroup ie., subvolumegroup 2
        ceph fs subvolume snapshot clone cephfs subvol_clone_status snap_1 clone_status_1 --group_name subvolgroup_1
            --target_group_name subvolgroup_2
    5. Validate all the states of clone creation progress
        pending : Clone operation has not started
        in-progress : Clone operation is in progress
        complete : Clone operation has successfully finished
    6.Once all the clones moved to complete state we are deleting all the clones

    Clean-up:
    1. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>]
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_clone_status_1"
            },
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_clone_status_2"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "group_name": "subvolgroup_clone_status_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_clone_status subvolgroup_clone_status_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_clone_status_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        clone_status_1 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_status_1",
            "group_name": "subvolgroup_clone_status_1",
        }
        fs_util.create_clone(client1, **clone_status_1)
        transitation_states = fs_util.validate_clone_state(
            client1, clone_status_1)
        valid_state_flow = [
            ["pending", "in-progress", "complete"],
            ["in-progress", "complete"],
        ]
        if transitation_states in valid_state_flow:
            return 1
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} {clone_status_1['target_subvol_name']}",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_2,
            extra_params=f" -r {clonevol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr {kernel_mounting_dir_1} {fuse_mounting_dir_2}")

        clone_status_2 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_status_2",
            "group_name": "subvolgroup_clone_status_1",
            "target_group_name": "subvolgroup_clone_status_2",
        }
        fs_util.create_clone(client1, **clone_status_2)
        transitation_states = fs_util.validate_clone_state(
            client1, clone_status_2)
        if transitation_states in valid_state_flow:
            return 1
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} "
            f"{clone_status_2['target_subvol_name']} {clone_status_2['target_group_name']}",
        )
        fuse_mounting_dir_3 = f"/mnt/cephfs_fuse{mounting_dir}_3/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_3,
            extra_params=f" -r {clonevol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr {kernel_mounting_dir_1} {fuse_mounting_dir_3}")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "clone_status_1"
            },
            {
                "vol_name": default_fs,
                "subvol_name": "clone_status_2",
                "group_name": "subvolgroup_clone_status_2",
            },
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
예제 #4
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573499	Remove the original volume after cloning and verify the data is accessible from cloned volume

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_remove_vol_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_remove_vol_1

    Test Case Flow:
    1. Copy the contents of the subvolume to local directory
    2. Create Clone out of subvolume
    3. Delete the snapshot and subvolume
    4. Mount the cloned volume
    5. Validate the contents of cloned volume with contents present in local directory

    Clean Up:
    1. Delete Cloned volume
    2. Delete subvolumegroup
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_remove_vol_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_remove_vol",
            "group_name": "subvolgroup_remove_vol_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_remove_vol subvolgroup_remove_vol_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_remove_vol",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_remove_vol_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        client1.exec_command(sudo=True, cmd=f"mkdir -p /tmp/{mounting_dir}")
        client1.exec_command(
            sudo=True,
            cmd=f"cp -r {kernel_mounting_dir_1}/* /tmp/{mounting_dir}")
        log.info("Clone a subvolume from snapshot")
        remove_vol_1 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_remove_vol",
            "snap_name": "snap_1",
            "target_subvol_name": "remove_vol_1",
            "group_name": "subvolgroup_remove_vol_1",
        }
        fs_util.create_clone(client1, **remove_vol_1)
        fs_util.validate_clone_state(client1, remove_vol_1)
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} {remove_vol_1['target_subvol_name']}",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_2,
            extra_params=f" -r {clonevol_path.read().decode().strip()}",
        )
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr /tmp/{mounting_dir} {fuse_mounting_dir_2}")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "remove_vol_1"
            },
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1,
                                **snapshot,
                                validate=False,
                                check_ec=False)
        fs_util.remove_subvolume(client1,
                                 **subvolume,
                                 validate=False,
                                 check_ec=False)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
예제 #5
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573418	Create a Snapshot, reboot the node and rollback the snapshot

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. creats fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_reboot_snapshot_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    5. Create Data on the subvolume. We will add known data as we are going to verify the files
        Ex: create_file_data()
    6. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_reboot_snapshot_1

    Script Flow:
    1. Mount the subvolume on the client using Kernel
    2. Write data into the mount point
    3. Get the checksum of the files inside the mount point
    4. Reboot the node
    5. Mount again the subvolume and revert the snapshot
    6. get the checksum of the files
    7. Validate the checksums
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_reboot_snapshot_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_reboot_snapshot",
            "group_name": "subvolgroup_reboot_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_reboot_snapshot subvolgroup_reboot_snapshot_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        fs_util.create_file_data(client1, kernel_mounting_dir_1, 3, "snap1",
                                 "snap_1_data ")
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_reboot_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_reboot_snapshot_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        files_checksum_before_reboot = fs_util.get_files_and_checksum(
            client1, f"/mnt/cephfs_kernel{mounting_dir}_1")
        fs_util.reboot_node(client1)
        kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_reboot_snapshot subvolgroup_reboot_snapshot_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_2,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"cd {kernel_mounting_dir_2};cp .snap/_snap_1_*/* .")
        files_checksum_after_reboot = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_2)
        if files_checksum_before_reboot != files_checksum_after_reboot:
            log.error("checksum is not matching after snapshot1 revert")
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
예제 #6
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573415	Test to validate the cli - ceph fs set <fs_name> allow_new_snaps true

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_flag_snapshot_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_flag_snapshot_1

    Retain the snapshots nad verify the data after cloning:
    1. Test allow_new_snaps value and try creating the snapshots

    Clean Up:
    1. Del all the snapshots created
    2. Del Subvolumes
    3. Del SubvolumeGroups
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_flag_snapshot_1"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "group_name": "subvolgroup_flag_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_retain_snapshot subvolgroup_flag_snapshot_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_flag_snapshot_1",
        }
        log.info("Test allow_new_snaps value and creating the snapshots")
        client1.exec_command(
            sudo=True, cmd=f"ceph fs set {default_fs} allow_new_snaps false"
        )
        cmd_out, cmd_rc = fs_util.create_snapshot(
            client1, **snapshot, check_ec=False, validate=False
        )
        if cmd_rc == 0:
            raise CommandFailed(
                f"ceph fs set {default_fs} allow_new_snaps false is not working properly"
            )
            return 1
        client1.exec_command(
            sudo=True, cmd=f"ceph fs set {default_fs} allow_new_snaps true"
        )
        fs_util.create_snapshot(client1, **snapshot)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **snapshot, validate=False, force=True)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
예제 #7
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573402	Test to validate the quota.max_bytes Create a FS and create 10 directories and m...
                    Test to validate the quota.max_bytes  Create a FS and create 10 directories and
                    mount them on kernel client and fuse client(5 mounts each)
                    Set max bytes quota to a number(say 1Gb) and fill data until it reaches the limit and
                    verify if the set  quota limit is working fine. Similarly set different limit on
                    different directories and verify quota.

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. set quota attribute 1gb on both mount points
    4. Create 3gb files and check it fails
    5. Perform same on kernel mount
    6. Create a directory inside fuse mount and set quota attribute and verify
    7. Create a directory inside kernel mount and set quota attribute and verify
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")

        log.info("Cheking the file quota on root directory")
        root_folder_fuse_mount = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount([clients[0]], root_folder_fuse_mount)

        clients[0].exec_command(
            sudo=True,
            cmd=f"rm -rf {root_folder_fuse_mount}*;mkdir {root_folder_fuse_mount}test_fuse;",
        )
        fs_util.set_quota_attrs(
            clients[0], 20, 1073741824, f"{root_folder_fuse_mount}test_fuse"
        )
        quota_attrs = fs_util.get_quota_attrs(
            clients[0], f"{root_folder_fuse_mount}test_fuse"
        )
        fs_util.byte_quota_test(
            clients[0], f"{root_folder_fuse_mount}test_fuse", quota_attrs
        )
        log.info("Setting Back the file quota to 0 on root directory ")
        fs_util.set_quota_attrs(
            clients[0], "0", "0", f"{root_folder_fuse_mount}test_fuse"
        )

        root_folder_kernel_mount = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]], root_folder_kernel_mount, ",".join(mon_node_ips)
        )
        clients[0].exec_command(
            sudo=True,
            cmd=f"rm -rf {root_folder_kernel_mount}*;mkdir {root_folder_kernel_mount}test_kernel",
        )
        fs_util.set_quota_attrs(
            clients[0], 20, 1073741824, f"{root_folder_kernel_mount}test_kernel"
        )
        quota_attrs = fs_util.get_quota_attrs(
            clients[0], f"{root_folder_kernel_mount}test_kernel"
        )
        fs_util.byte_quota_test(
            clients[0], f"{root_folder_kernel_mount}test_kernel", quota_attrs
        )
        log.info("Setting Back the file quota to 0 on root directory ")
        fs_util.set_quota_attrs(
            clients[0], "0", "0", f"{root_folder_kernel_mount}test_kernel"
        )

        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_quota_byte_limit_1"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_fuse",
                "group_name": "subvolgroup_quota_byte_limit_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_bytes_kernel",
                "group_name": "subvolgroup_quota_byte_limit_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_bytes_kernel subvolgroup_quota_byte_limit_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_byte_fuse subvolgroup_quota_byte_limit_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"

        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.strip()}",
        )

        fs_util.set_quota_attrs(clients[0], 200, 1073741824, kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], kernel_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        fs_util.set_quota_attrs(clients[0], 100, 1073741824, fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        clients[0].exec_command(
            sudo=True, cmd=f"cd {fuse_mounting_dir_1};rm -rf *;mkdir test;"
        )
        fs_util.set_quota_attrs(
            clients[0], 30, 1073741824, f"{fuse_mounting_dir_1}/test"
        )
        quota_attrs = fs_util.get_quota_attrs(clients[0], f"{fuse_mounting_dir_1}/test")
        fs_util.byte_quota_test(clients[0], f"{fuse_mounting_dir_1}/test", quota_attrs)

        clients[0].exec_command(
            sudo=True, cmd=f"cd {kernel_mounting_dir_1};rm -rf *;mkdir test;"
        )
        fs_util.set_quota_attrs(
            clients[0], 30, 1073741824, f"{kernel_mounting_dir_1}/test"
        )
        quota_attrs = fs_util.get_quota_attrs(
            clients[0], f"{kernel_mounting_dir_1}/test"
        )
        fs_util.byte_quota_test(
            clients[0], f"{kernel_mounting_dir_1}/test", quota_attrs
        )

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        if "5" in build:
            fs_util.set_quota_attrs(clients[0], "0", "0", root_folder_fuse_mount)
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
예제 #8
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites:
    1. Create 3 cephfs volume
       creats fs volume create <vol_name>
       ceph orch apply mds <fs_name> --placement='<no. of mds> <mds_nodes...>'

    Test operation:
    1. Create client1 restricted to first cephfs
       ceph fs authorize <fs_name> client.<client_id> <path-in-cephfs> rw
    2. Create client2 restricted to second cephfs
    3. Create client3 restricted to third cephfs
    4. Get filesystem information using client1
    5. Ensure only first cephfs info is shown
    6. Get filesystem information using client2
    7. Ensure only second cephfs info is shown
    8. Get filesystem information using client3
    9. Ensure only third cephfs info is shown

    Clean-up:
    1. Remove third cephfs
    2. Remove all the cephfs mounts
    3. Remove all the clients
    """
    try:
        tc = "CEPH-83573875"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))
        mdss = ceph_cluster.get_ceph_objects("mds")

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        mds1 = mdss[0].node.hostname
        mds2 = mdss[1].node.hostname
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        fs1 = "cephfs"
        fs2 = "cephfs-ec"
        fs3 = "Ceph_fs_new"
        commands = [
            f"ceph fs volume create {fs3}",
            f"ceph orch apply mds {fs3} --placement='2 {mds1} {mds2}'",
        ]
        for command in commands:
            err = clients[0].exec_command(sudo=True,
                                          cmd=command,
                                          long_running=True)
            if err:
                return 1
            wait_for_process(client=clients[0],
                             process_name=fs3,
                             ispresent=True)
        log.info(f"Creating client authorized to {fs1}")
        fs_util.fs_client_authorize(client1, fs1, "client1", "/", "rw")
        log.info(f"Creating client authorized to {fs2}")
        fs_util.fs_client_authorize(client1, fs2, "client2", "/", "rw")
        log.info(f"Creating client authorized to {fs3}")
        fs_util.fs_client_authorize(client1, fs3, "client3", "/", "rw")
        log.info("Verifying file system information for client1")
        command = (
            "ceph auth get client.client1 -o /etc/ceph/ceph.client.client1.keyring"
        )
        client1.exec_command(sudo=True, cmd=command)
        command = "ceph fs ls -n client.client1 -k /etc/ceph/ceph.client.client1.keyring --format json"
        out, rc = client1.exec_command(sudo=True, cmd=command)
        output = json.loads(out)
        validate_fs_info(fs1, output)
        log.info("Verifying file system information for client2")
        command = (
            "ceph auth get client.client2 -o /etc/ceph/ceph.client.client2.keyring"
        )
        client1.exec_command(sudo=True, cmd=command)
        command = "ceph fs ls -n client.client2 -k /etc/ceph/ceph.client.client2.keyring --format json"
        out, rc = client1.exec_command(sudo=True, cmd=command)
        output = json.loads(out)
        validate_fs_info(fs2, output)
        log.info("Verifying file system information for client3")
        command = (
            "ceph auth get client.client3 -o /etc/ceph/ceph.client.client3.keyring"
        )
        client1.exec_command(sudo=True, cmd=command)
        command = "ceph fs ls -n client.client3 -k /etc/ceph/ceph.client.client3.keyring --format json"
        out, rc = client1.exec_command(sudo=True, cmd=command)
        output = json.loads(out)
        validate_fs_info(fs3, output)
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up the system")
        commands = [
            "ceph config set mon mon_allow_pool_delete true",
            f"ceph fs volume rm {fs3} --yes-i-really-mean-it",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)

        for num in range(1, 4):
            client1.exec_command(sudo=True,
                                 cmd=f"ceph auth rm client.client{num}")
예제 #9
0
def run(ceph_cluster, **kw):
    try:
        tc = "CEPH-83574483"
        log.info("Running cephfs %s test case" % (tc))

        config = kw.get("config")
        rhbuild = config.get("rhbuild")
        from tests.cephfs.cephfs_utilsV1 import FsUtils

        fs_util = FsUtils(ceph_cluster)
        client = ceph_cluster.get_ceph_objects("client")
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        fs_count = 1
        fs_name = "cephfs"
        client_number = 1
        """
        Testing multiple cephfs client authorize for 5.x and
        Testing single cephfs client authorize for 4.x
        """
        while fs_count != 3:
            """
            Setting fs name for 4.x and
            Setting "Cephfs name" parameter for kernel & fuse mount for 5.x
            Leaving "Cephfs name" parameter empty for 4.x as parameter not supported in 4.x
            """
            if "4." in rhbuild:
                fs_name = "cephfs_new"
                kernel_fs_para = ""
                fuse_fs_para = ""
            else:
                kernel_fs_para = f",fs={fs_name}"
                fuse_fs_para = f" --client_fs {fs_name}"
            log.info(f"Testing client authorize for {fs_name}")
            # Create client with read-write permission on "/" directory
            mount_points = []
            client_name = "Client_" + str(client_number)
            kernel_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
            )
            fuse_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_lowercase + string.digits) for i in range(5)
            )
            mount_points.extend([kernel_mount_dir, fuse_mount_dir])
            log.info(
                f"Testing {client_name} with read-write permission on root directory"
            )
            fs_util.fs_client_authorize(client[0], fs_name, client_name, "/", "rw")
            # Mount cephfs on kernel & fuse client
            fs_util.kernel_mount(
                client,
                kernel_mount_dir,
                mon_node_ip,
                new_client_hostname=client_name,
                extra_params=kernel_fs_para,
            )
            fs_util.fuse_mount(
                client,
                fuse_mount_dir,
                new_client_hostname=client_name,
                extra_params=fuse_fs_para,
            )
            # Create directories & files inside them for this & next test scenarios
            for num in range(1, 4):
                log.info("Creating Directories")
                out, rc = client[0].exec_command(
                    sudo=True, cmd="mkdir %s/%s_%d" % (kernel_mount_dir, "dir", num)
                )
                out, rc = client[0].exec_command(
                    sudo=True,
                    cmd=f"dd if=/dev/zero of={kernel_mount_dir}/dir_{num}/file_{num} bs=10M count=10",
                )
            # Test read & write opearions on "/" directory on both kernel & fuse mount
            rc = test_read_write_op(
                client[0], kernel_mount_dir, fuse_mount_dir, client_name
            )
            if rc == 1:
                return 1
            log.info(f"Permissions set for client {client_name} is working")
            client_number += 1
            # Create client with read permission on "/" directory & read-write permission on "dir1" directory
            client_name = "Client_" + str(client_number)
            kernel_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
            )
            fuse_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_lowercase + string.digits) for i in range(5)
            )
            mount_points.extend([kernel_mount_dir, fuse_mount_dir])
            log.info(
                f"Testing {client_name} with read permission on root & read-write permission on /dir_1"
            )
            fs_util.fs_client_authorize(
                client[0], fs_name, client_name, "/", "r", extra_params=" /dir_1 rw"
            )
            # Mount cephfs on kernel & fuse client
            fs_util.kernel_mount(
                client,
                kernel_mount_dir,
                mon_node_ip,
                new_client_hostname=client_name,
                extra_params=kernel_fs_para,
            )
            fs_util.fuse_mount(
                client,
                fuse_mount_dir,
                new_client_hostname=client_name,
                extra_params=fuse_fs_para,
            )
            # Verify write operation on "/" directory fails
            rc = verify_write_failure(
                client[0], kernel_mount_dir, fuse_mount_dir, client_name
            )
            if rc == 1:
                return 1
            # Test read operation "/" directory & read-write operation on "dir1" directory
            commands = [
                f"dd if={fuse_mount_dir}/file of={fuse_mount_dir}/dir_1/file_copy_2 bs=10M count=10",
                f"dd if={kernel_mount_dir}/file of={kernel_mount_dir}/dir_1/file_copy_3 bs=10M count=10",
            ]
            for command in commands:
                _, err = client[0].exec_command(
                    sudo=True, cmd=command, long_running=True
                )
                if err:
                    log.error(
                        f"Permissions set for client {client_name} is not working"
                    )
                    return 1
            log.info(f"Permissions set for client {client_name} is working")
            client_number += 1
            # Create client with read permission on "dir_2" directory
            client_name = "Client_" + str(client_number)
            kernel_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
            )
            fuse_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_lowercase + string.digits) for i in range(5)
            )
            mount_points.extend([kernel_mount_dir, fuse_mount_dir])
            log.info(
                f"Testing {client_name} with read-write permission on /dir_2 directory"
            )
            fs_util.fs_client_authorize(client[0], fs_name, client_name, "/dir_2", "rw")
            # Mount cephfs on kernel & fuse client on sub_directory "dir_2"
            fs_util.kernel_mount(
                client,
                kernel_mount_dir,
                mon_node_ip,
                new_client_hostname=client_name,
                sub_dir="dir_2",
                extra_params=kernel_fs_para,
            )
            fs_util.fuse_mount(
                client,
                fuse_mount_dir,
                new_client_hostname=client_name,
                extra_params=f" -r /dir_2 {fuse_fs_para}",
            )
            # Verify mount on root directory fails
            kernel_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
            )
            fuse_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_lowercase + string.digits) for i in range(5)
            )
            try:
                fs_util.kernel_mount(
                    client,
                    kernel_mount_dir,
                    mon_node_ip,
                    new_client_hostname=client_name,
                    extra_params=kernel_fs_para,
                )
            except AssertionError as e:
                log.info(e)
                log.info(
                    f"Permissions set for client {client_name} is working for kernel mount"
                )
            except CommandFailed as e:
                log.info(e)
                err = str(e)
                err = err.split()
                if "mount" in err:
                    log.info(
                        f"Permissions set for client {client_name} is working for kernel mount"
                    )
                else:
                    log.info(traceback.format_exc())
                    return 1
            except Exception as e:
                log.info(e)
                log.info(traceback.format_exc())
                return 1
            else:
                log.error(
                    f"Permissions set for client {client_name} is not working for kernel mount"
                )
                return 1
            try:
                fs_util.fuse_mount(
                    client,
                    fuse_mount_dir,
                    new_client_hostname=client_name,
                    extra_params=fuse_fs_para,
                )
            except AssertionError as e:
                log.info(e)
                log.info(
                    f"Permissions set for client {client_name} is working for fuse mount"
                )
            except CommandFailed as e:
                log.info(e)
                err = str(e)
                err = err.split()
                if "mount" in err:
                    log.info(
                        f"Permissions set for client {client_name} is working for fuse mount"
                    )
                else:
                    log.info(traceback.format_exc())
                    return 1
            except Exception as e:
                log.info(e)
                log.info(traceback.format_exc())
                return 1
            else:
                log.error(
                    f"Permissions set for client {client_name} is not working for fuse mount"
                )
                return 1
            # Test read & write opearions on kernel & fuse mount
            commands = [
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top {kernel_mount_dir}",
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top {kernel_mount_dir}",
                f"dd if=/dev/zero of={fuse_mount_dir}/file bs=10M count=10",
                f"dd if={fuse_mount_dir}/file of={fuse_mount_dir}/file bs=10M count=10",
            ]
            for command in commands:
                _, err = client[0].exec_command(
                    sudo=True, cmd=command, long_running=True
                )
                if err:
                    log.error(
                        f"Permissions set for client {client_name} is not working"
                    )
                    return 1
            log.info(f"Permissions set for client {client_name} is working")
            client_number += 1
            # Create client with read permission on "dir_3" directory
            client_name = "Client_" + str(client_number)
            kernel_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
            )
            fuse_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_lowercase + string.digits) for i in range(5)
            )
            log.info(f"Testing {client_name} with read permission on /dir_3 directory")
            fs_util.fs_client_authorize(client[0], fs_name, client_name, "/dir_3", "r")
            # Verify mount on root directory fails
            try:
                fs_util.kernel_mount(
                    client,
                    kernel_mount_dir,
                    mon_node_ip,
                    new_client_hostname=client_name,
                    extra_params=kernel_fs_para,
                )
            except AssertionError as e:
                log.info(e)
                log.info(
                    f"Permissions set for client {client_name} is working for kernel mount"
                )
            except CommandFailed as e:
                log.info(e)
                err = str(e)
                err = err.split()
                if "mount" in err:
                    log.info(
                        f"Permissions set for client {client_name} is working for kernel mount"
                    )
                else:
                    log.info(traceback.format_exc())
                    return 1
            except Exception as e:
                log.info(e)
                log.info(traceback.format_exc())
                return 1
            else:
                log.error(f"Permissions set for client {client_name} is not working")
                return 1
            try:
                fs_util.fuse_mount(
                    client,
                    fuse_mount_dir,
                    new_client_hostname=client_name,
                    extra_params=fuse_fs_para,
                )
            except AssertionError as e:
                log.info(e)
                log.info(
                    f"Permissions set for client {client_name} is working for fuse mount"
                )
            except CommandFailed as e:
                log.info(e)
                err = str(e)
                err = err.split()
                if "mount" in err:
                    log.info(
                        f"Permissions set for client {client_name} is working for fuse mount"
                    )
                else:
                    log.info(traceback.format_exc())
                    return 1
            except Exception as e:
                log.info(e)
                log.info(traceback.format_exc())
                return 1
            else:
                log.error(f"Permissions set for client {client_name} is not working")
                return 1
            # Mount cephfs on kernel & fuse client on sub_directory "dir_3"
            kernel_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
            )
            fuse_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_lowercase + string.digits) for i in range(5)
            )
            mount_points.extend([kernel_mount_dir, fuse_mount_dir])
            fs_util.kernel_mount(
                client,
                kernel_mount_dir,
                mon_node_ip,
                new_client_hostname=client_name,
                sub_dir="dir_3",
                extra_params=kernel_fs_para,
            )
            fs_util.fuse_mount(
                client,
                fuse_mount_dir,
                new_client_hostname=client_name,
                extra_params=f" -r /dir_3 {fuse_fs_para}",
            )
            # Verify write opearions on kernel & fuse mount fails
            rc = verify_write_failure(
                client[0], kernel_mount_dir, fuse_mount_dir, client_name
            )
            if rc == 1:
                return 1
            # Verify read opearions on kernel & fuse mount
            commands = [
                f"dd if={fuse_mount_dir}/file_3 of=~/file_3 bs=10M count=10",
                f"dd if={kernel_mount_dir}/file_3 of=~/file_33 bs=10M count=10",
            ]
            for command in commands:
                _, err = client[0].exec_command(
                    sudo=True, cmd=command, long_running=True
                )
                if err:
                    log.error(
                        f"Permissions set for client {client_name} is not working"
                    )
                    return 1
            log.info(f"Permissions set for client {client_name} is working")
            log.info(f"Clean up the system for {fs_name}")
            out, rc = client[0].exec_command(
                sudo=True, cmd=f"rm -rf {mount_points[1]}/*"
            )
            for mount_point in mount_points:
                out, rc = client[0].exec_command(sudo=True, cmd=f"umount {mount_point}")
                if "5." in rhbuild:
                    out, err = client[1].exec_command(
                        sudo=True, cmd=f"umount {mount_point}"
                    )
            for mount_point in mount_points:
                out, rc = client[0].exec_command(
                    sudo=True, cmd=f"rm -rf {mount_point}/"
                )
                if "5." in rhbuild:
                    out, err = client[1].exec_command(
                        sudo=True, cmd=f"rm -rf {mount_point}/"
                    )
            if "4." in rhbuild:
                break
            fs_name = "cephfs-ec"
            fs_count += 1
            client_number += 1
        for num in range(1, 5):
            out, err = client[0].exec_command(
                sudo=True, cmd=f"ceph auth rm client.client_{num}"
            )
        if "5." in rhbuild:
            for num in range(5, 9):
                out, err = client[0].exec_command(
                    sudo=True, cmd=f"ceph auth rm client.client_{num}"
                )
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #10
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. Remove the pools added as part of pool_layout
    3. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    4. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create 2 pools, 1 - Replicated , 1 - EC Data Pool")
        create_pools = [
            "ceph osd pool create cephfs-data-pool-layout",
            "ceph osd pool create cephfs-data-pool-layout-ec 64 erasure",
            "ceph osd pool set cephfs-data-pool-layout-ec allow_ec_overwrites true",
        ]
        for cmd in create_pools:
            clients[0].exec_command(sudo=True, cmd=cmd)
        log.info("Add created data pools to each of the filesystem")
        add_pool_to_FS = [
            "ceph fs add_data_pool cephfs cephfs-data-pool-layout",
            "ceph fs add_data_pool cephfs-ec cephfs-data-pool-layout-ec",
        ]
        for cmd in add_pool_to_FS:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create cephfs subvolumegroup with desired data pool_layout")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1",
                "pool_layout": "cephfs-data-pool-layout",
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1",
                "pool_layout": "cephfs-data-pool-layout-ec",
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info(
            "Create 2 Sub volumes on each of the subvolume group Size 5 GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernel and 1 subvloume on Fuse → Client1"
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )

        log.info(
            "On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2"
        )
        if build.startswith("5"):
            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_3 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.read().decode().strip()}",
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_4 subvolgroup_ec1",
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_2,
                extra_params=
                f" -r {subvol_path.read().decode().strip()} --client_fs cephfs-ec",
            )

        log.info(
            "Check the Pool status before the IO's to confirm if no IO's are going on on the pool attached"
        )
        get_pool_status_before = fs_util.get_pool_df(
            client=clients[0],
            pool_name="cephfs-data-pool-layout",
            vol_name=default_fs)
        get_pool_status_before_EC = fs_util.get_pool_df(
            client=clients[1],
            pool_name="cephfs-data-pool-layout-ec",
            vol_name="cephfs-ec",
        )

        run_ios(clients[0], kernel_mounting_dir_1)
        run_ios(clients[0], fuse_mounting_dir_1)
        run_ios(clients[1], kernel_mounting_dir_2)
        run_ios(clients[1], fuse_mounting_dir_2)

        log.info(
            "Check the Pool status and verify the IO's are going only to the Pool attached"
        )
        get_pool_status_after = fs_util.get_pool_df(
            client=clients[0],
            pool_name="cephfs-data-pool-layout",
            vol_name=default_fs)
        get_pool_status_after_EC = fs_util.get_pool_df(
            client=clients[1],
            pool_name="cephfs-data-pool-layout-ec",
            vol_name="cephfs-ec",
        )

        if get_pool_status_after["used"] < get_pool_status_before["used"]:
            log.error("Pool attached is unused")
            return 1
        if get_pool_status_after_EC["used"] < get_pool_status_before_EC["used"]:
            log.info("EC Pool attached is unused")
            return 1

        log.info("Clean up the system")
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[0]],
                                mounting_dir=kernel_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[1]],
                                mounting_dir=kernel_mounting_dir_2)

        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[1]],
                                mounting_dir=fuse_mounting_dir_2)

        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")

        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
        log.info(
            "Remove the data pools from the filesystem and delete the created pools."
        )
        rm_pool_from_FS = [
            "ceph fs rm_data_pool cephfs cephfs-data-pool-layout",
            "ceph fs rm_data_pool cephfs-ec cephfs-data-pool-layout-ec",
            "ceph osd pool delete cephfs-data-pool-layout "
            "cephfs-data-pool-layout --yes-i-really-really-mean-it-not-faking",
            "ceph osd pool delete cephfs-data-pool-layout-ec "
            "cephfs-data-pool-layout-ec --yes-i-really-really-mean-it-not-faking",
        ]
        for cmd in rm_pool_from_FS:
            clients[0].exec_command(sudo=True, cmd=cmd)

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #11
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites:
    1. Create 2 cephfs volume
       creats fs volume create <vol_name>

    Test operation:
    1. Create client1 restricted to first cephfs
       ceph fs authorize <fs_name> client.<client_id> <path-in-cephfs> rw
    2. Create client2 restricted to second cephfs
    3. Mount first cephfs with client1
    4. Verify mounting second cephfs with client1 fails
    5. Mount second cephfs with client2
    6. Verify mounting first cephfs with client2 fails

    Clean-up:
    1. Remove all the cephfs mounts
    2. Remove all the created clients
    """
    try:
        tc = "CEPH-83573869"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        mount_points = []
        fs1 = "cephfs"
        fs2 = "cephfs-ec"
        log.info(f"Creating client authorized to {fs1}")
        fs_util.fs_client_authorize(client1, fs1, "client1", "/", "rw")
        log.info(f"Creating client authorized to {fs2}")
        fs_util.fs_client_authorize(client1, fs2, "client2", "/", "rw")
        kernel_mount_dir = "/mnt/kernel" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        fuse_mount_dir = "/mnt/fuse" + "".join(
            secrets.choice(string.ascii_lowercase + string.digits)
            for i in range(5))
        mount_points.extend([kernel_mount_dir, fuse_mount_dir])
        log.info(f"Mounting {fs1} with client1")
        fs_util.kernel_mount(
            clients,
            kernel_mount_dir,
            mon_node_ip,
            new_client_hostname="client1",
            extra_params=f",fs={fs1}",
        )
        fs_util.fuse_mount(
            clients,
            fuse_mount_dir,
            new_client_hostname="client1",
            extra_params=f" --client_fs {fs1}",
        )
        log.info(f"Verifying mount failure for client1 for {fs2}")
        rc = verify_mount_failure_on_root(
            fs_util,
            clients,
            kernel_mount_dir + "_dir1",
            fuse_mount_dir + "_dir1",
            "client1",
            mon_node_ip,
            fs_name=f"{fs2}",
        )
        if rc == 1:
            log.error(f"Mount success on {fs2} with client1")
            return 1
        kernel_mount_dir = "/mnt/kernel" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        fuse_mount_dir = "/mnt/fuse" + "".join(
            secrets.choice(string.ascii_lowercase + string.digits)
            for i in range(5))
        mount_points.extend([kernel_mount_dir, fuse_mount_dir])
        log.info(f"Mounting {fs2} with client2")
        fs_util.kernel_mount(
            clients,
            kernel_mount_dir,
            mon_node_ip,
            new_client_hostname="client2",
            extra_params=f",fs={fs2}",
        )
        fs_util.fuse_mount(
            clients,
            fuse_mount_dir,
            new_client_hostname="client2",
            extra_params=f" --client_fs {fs2}",
        )
        rc = verify_mount_failure_on_root(
            fs_util,
            clients,
            kernel_mount_dir + "_dir2",
            fuse_mount_dir + "_dir2",
            "client2",
            mon_node_ip,
            fs_name=f"{fs1}",
        )
        log.info(f"Verifying mount failure for client2 for {fs1}")
        if rc == 1:
            log.error(f"Mount success on {fs1} with client2")
            return 1
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up the system")
        for client in clients:
            for mount_point in mount_points:
                client.exec_command(sudo=True, cmd=f"umount {mount_point}")
        for num in range(1, 4):
            client1.exec_command(sudo=True,
                                 cmd=f"ceph auth rm client.client{num}")
예제 #12
0
def run(ceph_cluster, **kw):
    """
    CEPH-83574003 - Export the nfs share with cli with RO access
    Pre-requisites:
    1. Create cephfs volume
       creats fs volume create <vol_name>
    2. Create nfs cluster
       ceph nfs cluster create <nfs_name> <nfs_server>

    Test operation:
    1. Create cephfs nfs export
       ceph nfs export create cephfs <fs_name> <nfs_name> <nfs_export_name> --readonly path=<export_path>
    2. Verify write operation is not allowed on nfs export
    3. Verify read operation is allowed

    Clean-up:
    1. Remove all the data in Cephfs file system
    2. Remove all the cephfs mounts
    3. Delete all cephfs nfs export
    """
    try:
        tc = "CEPH-83574003"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        rhbuild = config.get("rhbuild")
        nfs_servers = ceph_cluster.get_ceph_objects("nfs")
        nfs_server = nfs_servers[0].node.hostname
        nfs_name = "cephfs-nfs"
        nfs_export_1 = "/export_" + "".join(
            secrets.choice(string.digits) for i in range(3))
        nfs_export_2 = "/export_" + "".join(
            secrets.choice(string.digits) for i in range(3))
        export_path = "/"
        fs_name = "cephfs"
        nfs_mounting_dir_1 = "/mnt/nfs_" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        nfs_mounting_dir_2 = "/mnt/nfs_" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        if "5.0" in rhbuild:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name}"
                f" {nfs_export_1} --readonly path={export_path}",
            )
        else:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {nfs_name} "
                f"{nfs_export_1} {fs_name} {export_path} --readonly",
            )
        commands = [
            f"mkdir {nfs_mounting_dir_1}",
            f"mount -t nfs -o port=2049 {nfs_server}:{nfs_export_1} {nfs_mounting_dir_1}",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        out, err = client1.exec_command(sudo=True,
                                        cmd=f"touch {nfs_mounting_dir_1}/file",
                                        check_ec=False)
        if not err:
            raise CommandFailed("NFS export has permission to write")

        if "5.0" in rhbuild:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name}"
                f" {nfs_export_2} path={export_path}",
            )
        else:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {nfs_name} "
                f"{nfs_export_2} {fs_name} path={export_path}",
            )
        commands = [
            f"mkdir {nfs_mounting_dir_2}",
            f"mount -t nfs -o port=2049 {nfs_server}:{nfs_export_2} {nfs_mounting_dir_2}",
            f"dd if=/dev/urandom of={nfs_mounting_dir_2}/file bs=1M count=1000",
            f"dd if={nfs_mounting_dir_1}/file of=~/copy_file bs=1M count=1000",
            f"diff {nfs_mounting_dir_2}/file ~/copy_file",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        log.info("Test completed successfully")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up")
        commands = [
            f"umount {nfs_mounting_dir_1}",
            f"rm -rf {nfs_mounting_dir_2}/*",
            f"umount {nfs_mounting_dir_2}",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        commands = [
            f"rm -rf {nfs_mounting_dir_2}/",
            "rm -f ~/copy_file",
            f"ceph nfs export delete {nfs_name} {nfs_export_1}",
            f"ceph nfs export delete {nfs_name} {nfs_export_2}",
        ]
        for command in commands:
            client1.exec_command(sudo=True,
                                 cmd=command,
                                 long_running=True,
                                 check_ec=False)
예제 #13
0
def run(ceph_cluster, **kw):
    """
    CEPH-83574015 - verify if nfs cluster can be deleted. and recreate with the same name works.
    Pre-requisites:
    1. Create cephfs volume
       creats fs volume create <vol_name>
    2. Create nfs cluster
       ceph nfs cluster create <nfs_name> <nfs_server>
    Test operation:
    1. Delete nfs cluster
       ceph nfs cluster delete <nfs_name>
    2. Create nfs cluster with same name
       ceph nfs cluster create <nfs_name> <nfs_server>
    3. Create cephfs nfs export
       ceph nfs export create cephfs <fs_name> <nfs_name> <nfs_export_name> path=<export_path>
    4. Mount nfs mount with cephfs export
       mount -t nfs -o port=2049 <nfs_server>:<nfs_export> <nfs_mounting_dir>
    5. Run some IO's
    Clean-up:
    1. Remove data in cephfs
    2. Remove cephfs nfs export
    3. Remove all nfs mounts
    """
    try:
        tc = "CEPH-83574028"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        rhbuild = config.get("rhbuild")
        nfs_servers = ceph_cluster.get_ceph_objects("nfs")
        nfs_server = nfs_servers[0].node.hostname
        nfs_name = "cephfs-nfs"
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        nfs_mounting_dir = "/mnt/nfs_" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        out, rc = client1.exec_command(sudo=True, cmd="ceph nfs cluster ls")
        output = out.split()
        if nfs_name in output:
            log.info("ceph nfs cluster is present")
        else:
            raise CommandFailed("ceph nfs cluster is absent")
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ceph nfs cluster delete {nfs_name}")
        time.sleep(5)
        out, rc = client1.exec_command(sudo=True, cmd="ceph nfs cluster ls")
        output = out.split()
        if nfs_name not in output:
            log.info("ceph nfs cluster deleted successfully")
        else:
            raise CommandFailed("Failed to delete nfs cluster")
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ceph nfs cluster create {nfs_name} {nfs_server}")
        time.sleep(5)
        out, rc = client1.exec_command(sudo=True, cmd="ceph nfs cluster ls")
        output = out.split()
        if nfs_name in output:
            log.info("ceph nfs cluster created successfully")
        else:
            raise CommandFailed("Failed to create nfs cluster")
        nfs_export_name = "/export_" + "".join(
            secrets.choice(string.digits) for i in range(3))
        export_path = "/"
        fs_name = "cephfs"
        if "5.0" in rhbuild:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name} "
                f"{nfs_export_name} path={export_path}",
            )
        else:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {nfs_name} "
                f"{nfs_export_name} {fs_name} path={export_path}",
            )
        commands = [
            f"mkdir -p {nfs_mounting_dir}",
            f"mount -t nfs -o port=2049 {nfs_server}:{nfs_export_name} {nfs_mounting_dir}",
            f"mkdir {nfs_mounting_dir}/dir1 {nfs_mounting_dir}/dir2"
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 --files"
            f" 1000 --files-per-dir 10 --dirs-per-dir 2 --top {nfs_mounting_dir}/dir1",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 10 --file-size 4 --files"
            f" 1000 --files-per-dir 10 --dirs-per-dir 2 --top {nfs_mounting_dir}/dir1",
            f"for n in {{1..20}}; do     dd if=/dev/urandom of={nfs_mounting_dir}/dir2"
            f"/file$(printf %03d "
            "$n"
            ") bs=500k count=1000; done",
            f"dd if={nfs_mounting_dir}/dir2/file1 of={nfs_mounting_dir}/dir2/copy_file1 bs=500k count=1000; done",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up the system")
        commands = [
            f"rm -rf {nfs_mounting_dir}/*",
            f"umount {nfs_mounting_dir}",
            f"ceph nfs export delete {nfs_name} {nfs_export_name}",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        client1.exec_command(sudo=True,
                             cmd=f"rm -rf {nfs_mounting_dir}/",
                             check_ec=False)
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. Create cephfs volume
       creats fs volume create <vol_name>
    2. Create cephfs subvolume
       creats fs subvolume create <vol_name> <subvolume_name>

    Operations:
    1. Mount Cephfs on kernel client with recover_session=no
    2. Block the client node on which cephfs is mounted
    3. Verify mount is inaccessible

    Clean-up:
    1. Remove all the data in Cephfs file system
    2. Unblock the client node
    3. Remove all cephfs mounts
    """
    try:
        tc = "CEPH-83573676"
        log.info(f"Running cephfs {tc} test case")
        fs_util = FsUtils(ceph_cluster)
        config = kw["config"]
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        rhbuild = config.get("rhbuild")
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        client1 = clients[0]
        client2 = clients[1]
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        mount_dir = "/mnt/" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
        )
        if "4." in rhbuild:
            fs_name = "cephfs_new"
        else:
            fs_name = "cephfs"
        commands = [
            f"ceph fs subvolume create {fs_name} sub1",
            f"mkdir {mount_dir}",
            f"mount -t ceph {mon_node_ip}:/ {mount_dir} -o name=admin,recover_session=no",
            f"ls {mount_dir}",
        ]
        for command in commands:
            out, rc = client1.exec_command(sudo=True, cmd=command)
        output = out.read().decode()
        if "volumes" in output:
            log.info("Cephfs mount is accessible")
        else:
            log.error("Cephfs mount is not accessible")
            return 1
        log.info("Creating Directories")
        commands = [
            f"mkdir {mount_dir}/volumes/dir",
            f"for n in {{1..5}}; do dd if=/dev/urandom of={mount_dir}/volumes/dir/file$( printf %03d "
            "$n"
            " )"
            " bs=1M count=1000; done",
            "ceph tell mds.0 client ls --format json",
        ]
        for command in commands:
            out, rc = client1.exec_command(sudo=True, cmd=command)
        output = json.loads(out.read().decode())
        for item in output:
            client_metadata = item["client_metadata"]
            kernel_client = 0
            if "kernel_version" in client_metadata.keys():
                kernel_client = 1
            if (
                client1.node.shortname == item["client_metadata"]["hostname"]
                and kernel_client == 1
            ):
                client_id = item["id"]
        log.info("Blocking the Cephfs client")
        command = f"ceph tell mds.0 client evict id={client_id}"
        out, rc = client1.exec_command(sudo=True, cmd=command)
        time.sleep(5)
        try:
            log.info("Verifying mount is inaccessible")
            out, rc = client1.exec_command(sudo=True, cmd=f"ls {mount_dir}")
        except CommandFailed as e:
            log.info(e)
            log.info("Mount point is inaccessible as expected")
            return 0
        else:
            output = out.read().decode()
            if "volumes" in output:
                log.error("Mount point is accessible")
                return 1

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up")
        mount_dir_2 = "/mnt/" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
        )
        client2.exec_command(sudo=True, cmd=f"mkdir {mount_dir_2}")
        command = f"mount -t ceph {mon_node_ip}:/ {mount_dir_2} -o name=admin"
        client2.exec_command(sudo=True, cmd=command)
        client2.exec_command(sudo=True, cmd=f"rm -rf {mount_dir_2}/*")
        client2.exec_command(sudo=True, cmd=f"umount {mount_dir_2}")
        out, rc = client1.exec_command(
            sudo=True, cmd="ifconfig eth0 | grep 'inet ' | awk '{{print $2}}'"
        )
        ip = out.read().decode()
        log.info("Unblocking the Cephfs client")
        if "4." in rhbuild:
            out, rc = client1.exec_command(
                sudo=True, cmd=f"ceph osd blacklist ls | grep {ip}"
            )
            output = out.read().decode()
            out = output.split()
            blocked_client = out[0]
            client1.exec_command(
                sudo=True, cmd=f"ceph osd blacklist rm {blocked_client}"
            )
        else:
            out, rc = client1.exec_command(
                sudo=True, cmd=f"ceph osd blocklist ls | grep {ip}"
            )
            output = out.read().decode()
            blocked_client = output.split()
            client = blocked_client[0]
            log.info(f"client_list - {client}")
            client1.exec_command(sudo=True, cmd=f"ceph osd blocklist rm {client}")
        client1.exec_command(sudo=True, cmd=f"umount {mount_dir}")
예제 #15
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573520	Validate the max snapshot that can be created under a root FS sub volume level.
                    Increase by 50 at a time until it reaches the max limit.

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. creats fs volume create cephfs if the volume is not there
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_max_snap --size 5368706371 --group_name subvolgroup_1
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/

    Test Script Flow :
    1. We will create snapshots in batch of 50 till 1000.
    2. We will breakout when ever it reaches the maximum allowed snapshots.

    Clean up:
    1. Deletes all the snapshots created
    2. Deletes snapshot and subvolume created.
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_max_snap",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_max_snap",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot_list = [{
            "vol_name": default_fs,
            "subvol_name": "subvol_max_snap",
            "snap_name": f"snap_limit_{x}",
        } for x in range(1, 1000)]
        for i in range(0, 1000, 50):
            for snapshot in snapshot_list[i:i + 50]:
                try:
                    fs_util.create_snapshot(clients[0],
                                            **snapshot,
                                            validate=False)
                except CommandFailed:
                    log.info(
                        f"Max Snapshots allowed under a root FS sub volume level is {i}"
                    )
                    max_sanpshots_allowed = i
                    break
            else:
                continue
            break
            log.info(f"Snapshot creation is successful from {i} to {i+50}")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        for snapshot in snapshot_list[0:max_sanpshots_allowed]:
            fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
예제 #16
0
def run(ceph_cluster, **kw):
    """
    CEPH-83574024 - Ensure Snapshot and cloning works on nfs exports
    Pre-requisites:
    1. Create cephfs volume
       creats fs volume create <vol_name>
    2. Create nfs cluster
       ceph nfs cluster create <nfs_name> <nfs_server>

    Test operation:
    1. Create cephfs nfs export
       ceph nfs export create cephfs <fs_name> <nfs_name> <nfs_export_name> path=<export_path>
    2. Crete cephfs subvolume group
    3. Create cephfs subvolume in cephfs subvolume group
    4. Create cephfs subvolume in deafault cephfs subvolume group
    5. Mount nfs mount with cephfs export
       "mount -t nfs -o port=2049 <nfs_server>:<nfs_export> <nfs_mounting_dir>
    6. Run IOs on both cephfs subvolumes
    7. Create snapshots of both cephfs subvolumes
    8. Create clone of both cephfs subvolumes from snapshots
    9. Verify data is consistent across subvolumes, snapshots & clones

    Clean-up:
    1. Remove all the data in Cephfs file system
    2. Remove all the cephfs mounts
    3. Delete cephfs nfs export
    """
    try:
        tc = "CEPH-83574024"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        rhbuild = config.get("rhbuild")
        nfs_servers = ceph_cluster.get_ceph_objects("nfs")
        nfs_server = nfs_servers[0].node.hostname
        nfs_name = "cephfs-nfs"
        nfs_export_name = "/export_" + "".join(
            secrets.choice(string.digits) for i in range(3))
        export_path = "/"
        fs_name = "cephfs"
        nfs_mounting_dir = "/mnt/nfs_" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        if "5.0" in rhbuild:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name} "
                f"{nfs_export_name} path={export_path}",
            )
        else:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {nfs_name} "
                f"{nfs_export_name} {fs_name} path={export_path}",
            )
        subvolumegroup = {
            "vol_name": fs_name,
            "group_name": "subvolume_group1",
        }
        fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": fs_name,
                "subvol_name": "subvolume1",
                "group_name": "subvolume_group1",
            },
            {
                "vol_name": fs_name,
                "subvol_name": "subvolume2",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(client1, **subvolume)
        commands = [
            f"mkdir -p {nfs_mounting_dir}",
            f"mount -t nfs -o port=2049 {nfs_server}:{nfs_export_name} {nfs_mounting_dir}",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {fs_name} subvolume1 --group_name subvolume_group1",
        )
        subvolume1_path = out.rstrip()
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {fs_name} subvolume2")
        subvolume2_path = out.rstrip()
        commands = [
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --file-size 4 "
            f"--files 1000 --top {nfs_mounting_dir}{subvolume1_path}",
            f"for n in {{1..20}}; do     dd if=/dev/urandom of={nfs_mounting_dir}{subvolume2_path}"
            f"/file$(printf %03d "
            "$n"
            ") bs=500k count=1000; done",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        commands = [
            f"ceph fs subvolume snapshot create {fs_name} subvolume1 snap1 --group_name subvolume_group1",
            f"ceph fs subvolume snapshot create {fs_name} subvolume2 snap2",
        ]
        for command in commands:
            out, err = client1.exec_command(sudo=True, cmd=command)
        clone_status_1 = {
            "vol_name": fs_name,
            "subvol_name": "subvolume1",
            "snap_name": "snap1",
            "target_subvol_name": "clone1",
            "group_name": "subvolume_group1",
            "target_group_name": "subvolume_group1",
        }
        fs_util.create_clone(client1, **clone_status_1)
        fs_util.validate_clone_state(client1, clone_status_1, timeout=6000)
        clone_status_2 = {
            "vol_name": fs_name,
            "subvol_name": "subvolume2",
            "snap_name": "snap2",
            "target_subvol_name": "clone2",
        }
        fs_util.create_clone(client1, **clone_status_2)
        fs_util.validate_clone_state(client1, clone_status_2, timeout=6000)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {fs_name} clone1 --group_name subvolume_group1",
        )
        clone1_path = out.rstrip()
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {fs_name} clone2")
        clone2_path = out.rstrip()
        commands = [
            f"diff -r {nfs_mounting_dir}{subvolume1_path} {nfs_mounting_dir}{subvolume1_path}/.snap/_snap1*",
            f"diff -r {nfs_mounting_dir}{subvolume2_path} {nfs_mounting_dir}{subvolume2_path}/.snap/_snap2*",
            f"diff -r {nfs_mounting_dir}{subvolume1_path} {nfs_mounting_dir}{clone1_path}",
            f"diff -r {nfs_mounting_dir}{subvolume2_path} {nfs_mounting_dir}{clone2_path}",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        log.info("Test completed successfully")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up")
        client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume snapshot rm {fs_name} subvolume1 snap1 --group_name subvolume_group1",
            check_ec=False,
        )
        client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume snapshot rm {fs_name} subvolume2 snap2",
            check_ec=False,
        )
        client1.exec_command(sudo=True, cmd=f"rm -rf {nfs_mounting_dir}/*")
        client1.exec_command(sudo=True, cmd=f"umount {nfs_mounting_dir}")
        client1.exec_command(sudo=True,
                             cmd=f"rm -rf {nfs_mounting_dir}/",
                             check_ec=False)
        client1.exec_command(
            sudo=True,
            cmd=f"ceph nfs export delete {nfs_name} {nfs_export_name}",
            check_ec=False,
        )
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. Create cephfs volume
       creats fs volume create <vol_name>
    2. Disable pg_autoscale_mode for cephfs pools if set
       ceph osd pool set cephfs_data pg_autoscale_mode off
       ceph osd pool set cephfs_metadata pg_autoscale_mode off
    3. Configure 2 clients with Fuse client and another 1 client with kernel client

    Test operation:
    1. Run IO's on both clients
    2. Verify there are no "heartbeat_map" timeout issue in logs
    3. Fill up cluster upto 20%
    4. Change cephfs data and metadata pool pg_num and pgp_num to existing size "-1" with client IO running
    5. Wait for cluster to come to active + clean state , while running IO's
    6. Write some more data to the cluster
    7. Change cephfs data and metadata pool pg_num and pgp_num to existing size "+1" with client IO running
    8. Wait for cluster to come to active + clean state , while running IO's

    Clean-up:
    1. Remove all the data in Cephfs file system
    2. Remove all the cephfs mounts
    3. Reset pg_autoscale_mode for cephfs pools to on
    """
    try:
        tc = "CEPH-83574596"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        rhbuild = config.get("rhbuild")
        build = config.get("build", config.get("rhbuild"))
        mdss = ceph_cluster.get_ceph_objects("mds")

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        mount_points = []
        kernel_mount_dir = "/mnt/" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        fs_util.kernel_mount(clients,
                             kernel_mount_dir,
                             mon_node_ip,
                             new_client_hostname="admin")
        fuse_mount_dir = "/mnt/" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        fs_util.fuse_mount(clients,
                           fuse_mount_dir,
                           new_client_hostname="admin")
        mount_points.extend([kernel_mount_dir, fuse_mount_dir])
        if "4." in rhbuild:
            data_pool = "cephfs_data"
            metadata_pool = "cephfs_metadata"
        else:
            data_pool = "cephfs.cephfs.data"
            metadata_pool = "cephfs.cephfs.meta"
        commands = [
            f"ceph osd pool set {data_pool} pg_autoscale_mode off",
            f"ceph osd pool set {metadata_pool} pg_autoscale_mode off",
        ]
        for command in commands:
            clients[0].exec_command(sudo=True, cmd=command, long_running=True)
        out, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph osd pool get {data_pool} pg_num | awk '{{print $2}}'")
        data_pool_pg_num = out.read().decode()
        out, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph osd pool get {metadata_pool} pg_num | awk '{{print $2}}'",
        )
        metadata_pool_pg_num = out.read().decode()
        for num in range(1, 7):
            log.info("Creating Directories")
            out, rc = clients[0].exec_command(sudo=True,
                                              cmd="mkdir %s/%s%d" %
                                              (kernel_mount_dir, "dir", num))
        log.info("Running IO's to get cluster upto 20% capacity")
        no_of_files = "{1..10}"
        commands = [
            f'for n in {no_of_files}; do dd if=/dev/urandom of={kernel_mount_dir}/dir1/file$( printf %03d "$n" )'
            f" bs=1M count=1000; done",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
            f"--files 1000 --files-per-dir 10 --top {kernel_mount_dir}/dir2",
        ]
        for command in commands:
            clients[0].exec_command(sudo=True, cmd=command, long_running=True)
        log.info("Checking for heartbeat map timeout issue")
        rc = fs_util.heartbeat_map(mdss[0])
        if rc == 1:
            log.error("heartbeat map timeout issue found")
            return 1
        log.info(
            "Changing cephfs data and metadata pool pg_num and pgp_num to existing size '-1' with client IO running"
        )
        no_of_files = "{1..1000}"
        data_pool_pg_num = str(int(data_pool_pg_num) - 1)
        metadata_pool_pg_num = str(int(metadata_pool_pg_num) - 1)
        commands = [
            f'for n in {no_of_files}; do dd if=/dev/urandom of={kernel_mount_dir}/dir3/file$( printf %03d "$n" )'
            f" bs=1M count=10; done",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
            f"--files 1000 --files-per-dir 10 --top {kernel_mount_dir}/dir4",
            f"ceph osd pool set {data_pool} pg_num {data_pool_pg_num}",
            f"ceph osd pool set {data_pool} pgp_num {data_pool_pg_num}",
            f"ceph osd pool set {metadata_pool} pg_num {metadata_pool_pg_num}",
            f"ceph osd pool set {metadata_pool} pgp_num {metadata_pool_pg_num}",
        ]
        with parallel() as p:
            for num in range(0, 6):
                p.spawn(clients[0].exec_command, sudo=True, cmd=commands[num])
                time.sleep(1)
        log.info("Verifying pgs are in active+clean state")
        rc = check_clean_pgs(clients)
        if rc == 1:
            return 1
        log.info(
            "Change cephfs data and metadata pool pg_num and pgp_num to existing size '+1' with client IO running"
        )
        data_pool_pg_num = str(int(data_pool_pg_num) + 1)
        metadata_pool_pg_num = str(int(metadata_pool_pg_num) + 1)
        commands = [
            f'for n in {no_of_files}; do     dd if=/dev/urandom of={kernel_mount_dir}/dir5/file$( printf %03d "$n" )'
            f" bs=1M count=10; done",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
            f"--files 1000 --files-per-dir 10 --top {kernel_mount_dir}/dir6",
            f"ceph osd pool set {data_pool} pg_num {data_pool_pg_num}",
            f"ceph osd pool set {data_pool} pgp_num {data_pool_pg_num}",
            f"ceph osd pool set {metadata_pool} pg_num {metadata_pool_pg_num}",
            f"ceph osd pool set {metadata_pool} pgp_num {metadata_pool_pg_num}",
        ]
        with parallel() as p:
            for num in range(0, 6):
                p.spawn(clients[0].exec_command, sudo=True, cmd=commands[num])
                time.sleep(1)
        log.info("Verifying pgs are in active+clean state")
        rc = check_clean_pgs(clients)
        if rc == 1:
            return 1
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up the system")
        out, rc = clients[0].exec_command(sudo=True,
                                          cmd=f"rm -rf {mount_points[1]}/*")
        for mount_point in mount_points:
            clients[0].exec_command(sudo=True, cmd=f"umount {mount_point}")
            if "4." in rhbuild:
                commands = [
                    f"ceph osd pool set {data_pool} pg_autoscale_mode warn",
                    f"ceph osd pool set {metadata_pool} pg_autoscale_mode warn",
                ]
                for command in commands:
                    clients[0].exec_command(sudo=True,
                                            cmd=command,
                                            long_running=True)
            else:
                commands = [
                    f"ceph osd pool set {data_pool} pg_autoscale_mode on",
                    f"ceph osd pool set {metadata_pool} pg_autoscale_mode on",
                ]
                for command in commands:
                    clients[0].exec_command(sudo=True,
                                            cmd=command,
                                            long_running=True)
        for mount_point in mount_points:
            clients[0].exec_command(sudo=True, cmd=f"rm -rf {mount_point}")
예제 #18
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-11319	Create first snap add more data to original then create a second snap.
                Rollback 1st snap do data validation.
                Rollback 2nd snap and do data validation.Perform cross platform rollback
                i.e. take snap on kernel mount and perform rollback using fuse mount
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvol_cross_platform_snapshot_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume. We will add known data as we are going to verify the files
        Ex: create_file_data()
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvol_cross_platform_snapshot_1

    Script Flow:
    1. Mount the subvolume on the client using Kernel and fuse mount
    2. Write data into the fuse mount point i.e., data_from_fuse_mount
    3. Collect the checksum of the files
    4. Take snapshot at this point i.e., snap_1
    5. Write data into the kernel mount point i.e., data_from_kernel_mount
    6. Collect the checksum of the files
    7. Take snapshot at this point i.e., snap_2
    8. On Kernel mount revert the snapshot to snap_1 and compare the checksum of the files collected in step 3
    9. On Fuse mount revert the snapshot to snap_2 and compare the checksum of the files colleced in step 6

    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvol_cross_platform_snapshot_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "group_name": "subvol_cross_platform_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_cross_platform_snapshot"
            f" subvol_cross_platform_snapshot_1",
        )
        subvol_path = subvol_path.read().decode().strip()
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path}",
        )
        fs_util.create_file_data(client1, fuse_mounting_dir_1, 3, "snap1",
                                 "data_from_fuse_mount ")
        fuse_files_checksum = fs_util.get_files_and_checksum(
            client1, fuse_mounting_dir_1)
        fuse_snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvol_cross_platform_snapshot_1",
        }
        fs_util.create_snapshot(client1, **fuse_snapshot)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path}",
        )
        fs_util.create_file_data(client1, kernel_mounting_dir_1, 3, "snap1",
                                 "data_from_kernel_mount ")
        kernel_snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "snap_name": "snap_2",
            "group_name": "subvol_cross_platform_snapshot_1",
        }
        fs_util.create_snapshot(client1, **kernel_snapshot)
        kernel_files_checksum = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1)
        client1.exec_command(
            sudo=True,
            cmd=f"cd {kernel_mounting_dir_1};cp .snap/_snap_1_*/* .")
        kernel_mount_revert_snap_fuse = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1)
        if fuse_files_checksum != kernel_mount_revert_snap_fuse:
            log.error(
                "checksum is not when reverted to snap1 i.e., from fuse mount snapshot revert"
            )
            return 1
        client1.exec_command(
            sudo=True, cmd=f"cd {fuse_mounting_dir_1};cp .snap/_snap_2_*/* .")
        fuse_mount_revert_snap_kernel = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1)
        if kernel_files_checksum != fuse_mount_revert_snap_kernel:
            log.error(
                "checksum is not when reverted to snap2 i.e., from kernel mount snapshot revert"
            )
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **kernel_snapshot)
        fs_util.remove_snapshot(client1, **fuse_snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
예제 #19
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573408   Test to validate the quota remains intact even after rebooting the Node.
                    Create a FS and create 10 directories and mount them on kernel client and fuse client(5 mounts
                    each). Set max bytes quota to a number(say 1Gb) and also set max files quota (say 20) and verify if
                    the set quota limit is working fine by filling max number of files and also by filling data to reach
                    the max limit. Reboot the node , once the node is up verify if the set quota remains or not.
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. Set quota attribute 1gb and 50 files on both mount points
    4. Reboot the client node
    5. mount the subvolumes again
    6. Validate the quota attributes after reboot
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_quota_byte_increase_1"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_incr_fuse",
                "group_name": "subvolgroup_quota_byte_increase_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_incr_kernel",
                "group_name": "subvolgroup_quota_byte_increase_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_byte_incr_kernel subvolgroup_quota_byte_increase_1",
        )
        kernel_subvol_path = subvol_path.read().decode().strip()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{kernel_subvol_path}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_byte_incr_fuse subvolgroup_quota_byte_increase_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fuse_subvol_path = subvol_path.read().decode().strip()
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {fuse_subvol_path}",
        )
        fs_util.set_quota_attrs(clients[0], 50, 1073741824, fuse_mounting_dir_1)
        fuse_quota_attrs_before_reboot = fs_util.get_quota_attrs(
            clients[0], fuse_mounting_dir_1
        )

        fs_util.set_quota_attrs(clients[0], 50, 1073741824, kernel_mounting_dir_1)
        kernel_quota_attrs_before_reboot = fs_util.get_quota_attrs(
            clients[0], kernel_mounting_dir_1
        )

        fs_util.reboot_node(client1)

        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_2,
            ",".join(mon_node_ips),
            sub_dir=f"{kernel_subvol_path}",
        )
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_2,
            extra_params=f" -r {fuse_subvol_path}",
        )
        fuse_quota_attrs_after_reboot = fs_util.get_quota_attrs(
            clients[0], fuse_mounting_dir_2
        )
        kernel_quota_attrs_after_reboot = fs_util.get_quota_attrs(
            clients[0], kernel_mounting_dir_2
        )
        log.info(
            f"Quota Attributes befores reboot:{fuse_quota_attrs_before_reboot}\n"
            f"After reboot: {fuse_quota_attrs_after_reboot}"
        )
        if fuse_quota_attrs_after_reboot != fuse_quota_attrs_before_reboot:
            log.info("Fuse mount quota attributes are not matching after reboot")
            log.error(
                f"Quota Attributes befores reboot:{fuse_quota_attrs_before_reboot}\n"
                f"After reboot: {fuse_quota_attrs_after_reboot}"
            )
            return 1
        log.info(
            f"Quota Attributes befores reboot:{kernel_quota_attrs_before_reboot}\n"
            f"After reboot: {kernel_quota_attrs_after_reboot}"
        )
        if kernel_quota_attrs_before_reboot != kernel_quota_attrs_after_reboot:
            log.info("Fuse mount quota attributes are not matching after reboot")
            log.error(
                f"Quota Attributes befores reboot:{kernel_quota_attrs_before_reboot}\n"
                f"After reboot: {kernel_quota_attrs_after_reboot}"
            )
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
예제 #20
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573502	Interrupt the cloning operation in-between and observe the behavior..
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. Create 1 sub volume groups
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_clone_cancel --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_clone_cancel snap_1 --group_name subvolgroup_1

    Clone Operations and Clone States:
    1. Create a clone in default locaction.
        ceph fs subvolume snapshot clone cephfs subvol_clone_cancel snap_1 clone_status_1 --group_name subvolgroup_1
    2. Create a clone and Cancel the Operation and check the state is in "canceled".
    3. Clone state should move to canceled state

    Clean-up:
    1. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>]
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_clone_cancel_1"}
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_cancel",
            "group_name": "subvolgroup_clone_cancel_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_clone_cancel subvolgroup_clone_cancel_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_cancel",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_clone_cancel_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        clone_status_3 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_cancel",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_status_3",
            "group_name": "subvolgroup_clone_cancel_1",
        }
        fs_util.create_clone(client1, **clone_status_3)
        fs_util.clone_cancel(
            client1,
            clone_status_3["vol_name"],
            clone_status_3["target_subvol_name"],
            group_name=clone_status_3.get("target_group_name", ""),
        )
        fs_util.validate_clone_state(client1, clone_status_3, "canceled")
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {"vol_name": default_fs, "subvol_name": "clone_status_3", "force": True},
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
예제 #21
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. Create cephfs volume
       creats fs volume create <vol_name>

    Subvolume authorize operations
    1. Create cephfs subvolume
       ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>]
    2. Create client (say client.1) with read-write permission on subvolume created in step 1
       ceph fs subvolume authorize <vol_name> <sub_name> <auth_id> [--access_level=rw]
    3. Verify client is created in authorized list of clients in subvolume
       ceph fs subvolume authorized_list <vol_name> <sub_name> [--group_name=<group_name>]
    4. Get path of subvolume
       ceph fs subvolume getpath <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    5. Mount kcephfs on path of subvolume we got in step 3 with “client.1” created in step 2
       mount -t ceph MONITOR-1_NAME:6789:/SUBVOLUME_PATH MOUNT_POINT -o name=CLIENT_ID,fs=FILE_SYSTEM_NAME
    6. Verify read-write operations
    7. Mount ceph-fuse  on path of subvolume we got in step 3 with “client.1”
       ceph-fuse -n client.CLIENT_ID MOUNT_POINT -r SUBVOLUME_PATH
    8. Verify read-write operation
    9. Verify kcephfs mount on “/” directory  fails with “client.1”
       mount -t ceph MONITOR-1_NAME:6789,MONITOR-2_NAME:6789,MONITOR-3_NAME:6789:/ -o name=CLIENT_ID,fs=FILE_SYSTEM_NAME
    10. Verify ceph-fuse mount on “/” directory fails with “client.1”
        ceph-fuse -n client.CLIENT_ID MOUNT_POINT
    11. Evict “client1”
        ceph fs subvolume evict <vol_name> <sub_name> <auth_id> [--group_name=<group_name>]
    12. Verify kernel & fuse mounts of “client1” are not accessible
    13. Deauthorize “client1”
        ceph fs subvolume deauthorize <vol_name> <sub_name> <auth_id> [--group_name=<group_name>]
    14. Verify client is removed in authorized list of clients in subvolume
        ceph fs subvolume authorized_list <vol_name> <sub_name> [--group_name=<group_name>]
    15. Create client (say client.2) with read-only permission on subvolume created in step 1
        ceph fs subvolume authorize <vol_name> <sub_name> <auth_id> [--access_level=r]
    16. Mount kcephfs on path of subvolume we got in step 16 with “client.2” created in step 15
    17. Verify read operation on subvolume
    18. Verify write operation fails on subvolume
    19. Mount ceph-fuse  on path of subvolume we got in step 16 with “client.2”
    20. Repeat steps 17-18
    21. Verify kcephfs mount on “/” directory  fails with “client.2”
    22. Verify ceph-fuse mount on “/” directory fails with “client.2”
    23. Evict “client2”
    24. Verify kernel & fuse mounts of “client2” are not accessible
    25. Deauthorize “client2”
    26. Verify client is removed in authorized list of clients in subvolume
    27. Create cephfs subvolumegroup
        ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
    28. Create cephfs subvolume in subvolumegroup
        ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    29. Reapeat steps 2-27 on cephfs subvolume created in above step (step 28)

    Clean-up:
    1. Remove all the data in Cephfs file system
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    4. Remove all the cephfs mounts
    """
    try:
        tc = "CEPH-83574596"
        log.info("Running cephfs %s test case" % (tc))

        config = kw.get("config")
        rhbuild = config.get("rhbuild")

        fs_util = FsUtils(ceph_cluster)
        client = ceph_cluster.get_ceph_objects("client")
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)

        fs_name = "cephfs"
        subvolume_name = "sub1"
        subvolgroup_name = ""
        if "4." in rhbuild:
            fs_name = "cephfs_new"
        client_no = 1
        for i in range(1, 3):
            if subvolgroup_name == "":
                log.info("Creating Cephfs subvolume")
                fs_util.create_subvolume(client[0], fs_name, subvolume_name)
            else:
                log.info("Creating Cephfs subvolumegroup")
                fs_util.create_subvolumegroup(client[0], fs_name, subvolgroup_name)
                log.info("Creating Cephfs subvolume in subvolumegroup")
                fs_util.create_subvolume(
                    client[0], fs_name, subvolume_name, group_name=subvolgroup_name
                )

            mount_points = []
            client_name = "Client" + str(client_no)
            kernel_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
            )
            fuse_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_lowercase + string.digits) for i in range(5)
            )
            mount_points.extend([kernel_mount_dir, fuse_mount_dir])
            log.info("Testing client with read-write permission on subvolume")
            fs_util.subvolume_authorize(
                client[0],
                fs_name,
                subvolume_name,
                client_name,
                extra_params=f" {subvolgroup_name} --access_level=rw",
            )
            out, rc = client[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume authorized_list {fs_name} {subvolume_name} {subvolgroup_name}",
            )
            if client_name in out:
                log.info("Client creation successful")
            else:
                log.error("Client creation failed")
                return 1
            log.info("Getting the path of Cephfs subvolume")
            subvol_path, rc = client[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {fs_name} {subvolume_name} {subvolgroup_name}",
            )
            subvol_path = subvol_path.strip()
            log.info(f"Testing kernel & fuse mount for {client_name}")
            fs_util.kernel_mount(
                client,
                kernel_mount_dir,
                mon_node_ip,
                new_client_hostname=client_name,
                sub_dir=f"{subvol_path}",
            )
            fs_util.fuse_mount(
                client,
                fuse_mount_dir,
                new_client_hostname=client_name,
                extra_params=f" -r {subvol_path}",
            )
            log.info(f"Testing read-write operation on {client_name}")
            rc = test_read_write_op(
                client[0], kernel_mount_dir, fuse_mount_dir, client_name
            )
            if rc == 1:
                return 1
            kernel_mount_dir2 = "/mnt/" + "".join(
                secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
            )
            fuse_mount_dir2 = "/mnt/" + "".join(
                secrets.choice(string.ascii_lowercase + string.digits) for i in range(5)
            )
            log.info(f"Verifying mount on root directory fails for {client_name}")
            rc = verify_mount_failure_on_root(
                fs_util,
                client,
                kernel_mount_dir2,
                fuse_mount_dir2,
                client_name,
                mon_node_ip,
            )
            if rc == 1:
                return 1
            log.info(f"Testing client eviction for {client_name}")
            out, rc = client[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume evict {fs_name} {subvolume_name} {client_name} {subvolgroup_name}",
            )
            rc = verifiy_client_eviction(
                client[0], kernel_mount_dir, fuse_mount_dir, client_name
            )
            if rc == 1:
                return 1
            log.info(f"Testing deauthorization for {client_name}")
            out, rc = client[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume deauthorize {fs_name} {subvolume_name} {client_name} {subvolgroup_name}",
            )
            out, rc = client[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume authorized_list {fs_name} {subvolume_name} {subvolgroup_name}",
            )
            if subvolume_name not in out:
                log.info(f"{client_name} is deauthorized successfully")
            else:
                log.error(f"{client_name} deauthorization failed")
                return 1
            client_no += 1
            client_name = "Client" + str(client_no)
            kernel_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
            )
            fuse_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_lowercase + string.digits) for i in range(5)
            )
            mount_points.extend([kernel_mount_dir, fuse_mount_dir])
            log.info("Testing client with read-only permission on subvolume")
            fs_util.subvolume_authorize(
                client[0],
                fs_name,
                subvolume_name,
                client_name,
                extra_params=f" {subvolgroup_name} --access_level=r",
            )
            out, rc = client[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume authorized_list {fs_name} {subvolume_name} {subvolgroup_name}",
            )
            if client_name in out:
                log.info("Client creation successful")
            else:
                log.error("Client creation failed")
                return 1
            log.info(f"Testing kernel & fuse mount for {client_name}")
            fs_util.kernel_mount(
                client,
                kernel_mount_dir,
                mon_node_ip,
                new_client_hostname=client_name,
                sub_dir=f"{subvol_path}",
            )
            fs_util.fuse_mount(
                client,
                fuse_mount_dir,
                new_client_hostname=client_name,
                extra_params=f" -r {subvol_path}",
            )
            log.info(f"Testing read operation for {client_name}")
            commands = [
                f"dd if={kernel_mount_dir}/file of=~/file1 bs=10M count=10",
                f"dd if={fuse_mount_dir}/file  of=~/file2 bs=10M count=10",
            ]
            for command in commands:
                err = client[0].exec_command(sudo=True, cmd=command, long_running=True)
                if err:
                    log.error(f"Permissions set for {client_name} is not working")
                    return 1
            log.info(f"Verifying write operation fails for {client_name}")
            rc = verify_write_failure(
                client[0], kernel_mount_dir, fuse_mount_dir, client_name
            )
            if rc == 1:
                return 1
            kernel_mount_dir2 = "/mnt/" + "".join(
                secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
            )
            fuse_mount_dir2 = "/mnt/" + "".join(
                secrets.choice(string.ascii_lowercase + string.digits) for i in range(5)
            )
            log.info(f"Verifying mount on root directory fails for {client_name}")
            rc = verify_mount_failure_on_root(
                fs_util,
                client,
                kernel_mount_dir2,
                fuse_mount_dir2,
                client_name,
                mon_node_ip,
            )
            if rc == 1:
                return 1
            log.info(f"Testing client eviction for {client_name}")
            out, rc = client[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume evict {fs_name} {subvolume_name} {client_name} {subvolgroup_name}",
            )
            rc = verifiy_client_eviction(
                client[0], kernel_mount_dir, fuse_mount_dir, client_name
            )
            if rc == 1:
                return 1
            log.info(f"Testing deauthorization for {client_name}")
            out, rc = client[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume deauthorize {fs_name} {subvolume_name} {client_name} {subvolgroup_name}",
            )
            out, rc = client[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume authorized_list {fs_name} {subvolume_name} {subvolgroup_name}",
            )
            if subvolume_name not in out:
                log.info(f"{client_name} is deauthorized successfully")
            else:
                log.error(f"{client_name} deauthorization failed")
                return 1
            log.info("Cleaning up the system")
            kernel_mount_dir = "/mnt/" + "".join(
                secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
            )
            mount_points.extend([kernel_mount_dir])
            fs_util.kernel_mount(
                client, kernel_mount_dir, mon_node_ip, new_client_hostname="admin"
            )
            out, rc = client[0].exec_command(
                sudo=True, cmd=f"rm -rf {kernel_mount_dir}/*"
            )
            for mount_point in mount_points:
                out, rc = client[0].exec_command(sudo=True, cmd=f"umount {mount_point}")
                if "5." in rhbuild:
                    out, err = client[1].exec_command(
                        sudo=True, cmd=f"umount {mount_point}"
                    )
            for mount_point in mount_points:
                out, rc = client[0].exec_command(
                    sudo=True, cmd=f"rm -rf {mount_point}/"
                )
                if "5." in rhbuild:
                    out, err = client[1].exec_command(
                        sudo=True, cmd=f"rm -rf {mount_point}/"
                    )
            client_no += 1
            subvolgroup_name = "subvolgroup1"
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573522   Verify the retained snapshot details with "ceph fs info" command

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_info_retain
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_info_retain

    Retain the snapshots nad verify the data after cloning:
    1. Check the state od subvolume it should be in complete.
    2. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>
        --force --retain-snapshots]
    3. Remove the sub volume.
    4. Check the state of subvolume it should be in snapshot-retained.

    Clean Up:
    1. Del all the snapshots created
    2. Del Subvolumes
    3. Del SubvolumeGroups
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_info_retain"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_info",
            "group_name": "subvolgroup_info_retain",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_retain_info subvolgroup_info_retain",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_info",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_info_retain",
        }
        fs_util.create_snapshot(client1, **snapshot)
        client1.exec_command(sudo=True, cmd=f"mkdir -p /tmp/{mounting_dir}")
        client1.exec_command(
            sudo=True,
            cmd=f"cp -r {kernel_mounting_dir_1}/* /tmp/{mounting_dir}")
        subvol_info, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume info {default_fs} subvol_retain_info subvolgroup_info_retain --format json",
            check_ec=False,
        )
        subvol_info_state = json.loads(subvol_info.read().decode())
        log.info(
            f"subvol state before removing the volume with --retain-snapshots {subvol_info_state['state']}"
        )
        if subvol_info_state["state"] != "complete":
            raise CommandFailed(
                f"subvol state should be in complete state "
                f"but current state is {subvol_info_state['state']}")
        fs_util.remove_subvolume(client1,
                                 **subvolume,
                                 retain_snapshots=True,
                                 force=True,
                                 validate=False)
        log.info(
            "Verifying Get the path of sub volume as subvolume will still be listed in filesystem"
        )
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_retain_info subvolgroup_info_retain",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed(
                "Remove subvolume with --retainSnapshots has not succeeded."
                "We are still able to fetch path of subvolume after deletion")
            return 1
        subvol_info, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume info {default_fs} subvol_retain_info subvolgroup_info_retain --format json",
            check_ec=False,
        )
        subvol_info_state = json.loads(subvol_info.read().decode())
        if subvol_info_state["state"] != "snapshot-retained":
            raise CommandFailed(
                f"subvol state should be in snapshot-retained state "
                f"but current state is {subvol_info_state['state']}")
        log.info(
            f"subvol state after removing the volume with --retain-snapshots {subvol_info_state['state']}"
        )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_retain_info"
            },
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1,
                                     **clone_vol,
                                     validate=False,
                                     force=True,
                                     check_ec=False)
        fs_util.remove_snapshot(client1,
                                **snapshot,
                                validate=False,
                                force=True)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
예제 #23
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573407    Negative : Test to validate the decrease in quota limit once it reaches the max limit. (bytes)
                     Create a FS and create 10 directories and mount them on kernel client and fuse client(5 mountseach)
                     Set max bytes quota to a number(say 1Gb) and fill data until it reaches the limit and
                     verify if the set quota limit is working fine.
                     Try setting the quota limit to lesser than what was set
                     earlier, setting quota lesser that the space occupied shouldn’t be allowed. Repeat the same
                     procedures for 5-10 times.

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. set quota attribute 1gb on both mount points
    4. Create 3gb bytes and check it fails
    5. Perform same on kernel mount
    6 Increase the quota of bytes to 3gb and try creating the files on the same directory used in step 3
    7. Perform same on Kernel mount
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_quota_byte_increase_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_incr_fuse",
                "group_name": "subvolgroup_quota_byte_increase_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_incr_kernel",
                "group_name": "subvolgroup_quota_byte_increase_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_byte_incr_kernel subvolgroup_quota_byte_increase_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_byte_incr_fuse subvolgroup_quota_byte_increase_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )
        fs_util.set_quota_attrs(clients[0], 50, 100000, fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        log.info("Increasing the quota to .5gb and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 100, 50000, fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        fs_util.set_quota_attrs(clients[0], 50, 100000, kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        log.info("Increasing the quota to .5gb and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 100, 50000, kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
예제 #24
0
def run(ceph_cluster, **kw):
    """
    CEPH-83574026 - zip unzip files continuously on a nfs share
    Pre-requisites:
    1. Create cephfs volume
       creats fs volume create <vol_name>
    2. Create nfs cluster
       ceph nfs cluster create <nfs_name> <nfs_server>
    Test operation:
    1. Create cephfs nfs export
       ceph nfs export create cephfs <fs_name> <nfs_name> <nfs_export_name> path=<export_path>
    2. Create multiple files
    3. Continuously zip & unzip files create in step 2
    4. Repeat step 2-3
    Clean-up:
    1. Remove all the data in Cephfs file system
    2. Remove all the cephfs mounts
    3. Delete cephfs nfs export
    """
    try:
        tc = "CEPH-83574024"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        rhbuild = config.get("rhbuild")
        nfs_servers = ceph_cluster.get_ceph_objects("nfs")
        nfs_server = nfs_servers[0].node.hostname
        nfs_name = "cephfs-nfs"
        nfs_export_name = "/export_" + "".join(
            secrets.choice(string.digits) for i in range(3))
        export_path = "/"
        fs_name = "cephfs"
        nfs_mounting_dir = "/mnt/nfs_" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        if "5.0" in rhbuild:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name} "
                f"{nfs_export_name} path={export_path}",
            )
        else:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {nfs_name} "
                f"{nfs_export_name} {fs_name} path={export_path}",
            )
        commands = [
            f"mkdir -p {nfs_mounting_dir}",
            f"mount -t nfs -o port=2049 {nfs_server}:{nfs_export_name} {nfs_mounting_dir}",
            "yum install zip -y",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        client1.exec_command(sudo=True,
                             cmd=f"mkdir {nfs_mounting_dir}/dir1..10")
        fs_util.create_file_data(client1, nfs_mounting_dir + "/files_dir1",
                                 100, "file", "file_data")
        fs_util.create_file_data(client1, nfs_mounting_dir + "/dir7", 100,
                                 "file", "random_data")
        commands = [
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir2/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir1/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir2/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir2/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir3/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir2/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir3/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir3/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir4/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir3/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir4/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir4/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir5/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir4/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir5/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir5/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir6/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir5/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir6/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir6/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir7/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir6/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir7/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir7/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir8/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir7/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir8/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir8/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir9/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir8/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir9/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir9/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir10/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir9/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir10/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir10/",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up the system")
        commands = [
            f"rm -rf {nfs_mounting_dir}/*",
            f"umount {nfs_mounting_dir}",
            f"ceph nfs export delete {nfs_name} {nfs_export_name}",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        client1.exec_command(sudo=True,
                             cmd=f"rm -rf {nfs_mounting_dir}/",
                             check_ec=False)
예제 #25
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. Requires 2 client nodes in the setup

    Operations performed :
    1. Enable Multiple File systems 1. In Replicated 2. In EC
    2. Create 2 SubVolumeGroups on each file system
    3. Create 2 Sub volumes on each of the subvolume group Size 20 GB
    4. Create 2 sub volumes on default subvolume group
    5. Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client1
    6. Mount 1 subvolumeon kernal and 1 subvloumegroup/subvolume on Fuse → Client2
    7. On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2
    8. On EC,Mount 1 subvolumeon kernal and 1 subvloumegroup/subvolume on Fuse → Client1
    9. Run IOs on subvolumegroup/subvolume on kernel client and subvolume in Fuse clientusing below commands
        git clone https://github.com/distributed-system-analysis/smallfile.git
        cd smallfile
        for i in create read append read delete create overwrite rename delete-renamed mkdir rmdir create symlink
        stat chmod ls-l delete cleanup  ;
        do python3 smallfile_cli.py --operation $i --threads 8 --file-size 10240 --files 100 --top /mnt/kcephfs/vol5/
        ; done
        IO Tool 2 :
        wget -O linux.tar.gz http://download.ceph.com/qa/linux-5.4.tar.gz
        tar -xzf linux.tar.gz tardir/ ; sleep 10 ; rm -rf  tardir/ ; sleep 10 ; done
        DD on Each volume:

        Wget :
       http://download.eng.bos.redhat.com/fedora/linux/releases/34/Server/x86_64/iso/Fedora-Server-dvd-x86_64-34-1.2.iso
       http://download.eng.bos.redhat.com/fedora/linux/releases/33/Server/x86_64/iso/Fedora-Server-dvd-x86_64-33-1.2.iso
       http://download.eng.bos.redhat.com/fedora/linux/releases/32/Server/x86_64/iso/Fedora-Server-dvd-x86_64-32-1.6.iso
        Note : Run 2 IO tools mentioned above on each volume mounted
    10. Create Snapshots.Verify the snap ls | grep to see snap ot created
    11. Create Clones on all 8 volumes. Verify the clones got created using clone ls(subvolume ls)
    12. Set File Level Quota on 2 directories under subvolume and Size Level Quota on under 2 directories
        under subvolume.
    13. Verify quota based on your configuration
    14. Clear Quotas
    15. Remove Clones
    16. Remove Snapshots
    17. Unmount
    18. Remove Volumes.

    Args:
        ceph_cluster:
        **kw:

    Returns:

    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)
        log.info("Create 2 SubVolumeGroups on each file system")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_1"},
            {"vol_name": default_fs, "group_name": "subvolgroup_2"},
            {"vol_name": "cephfs-ec", "group_name": "subvolgroup_1"},
            {"vol_name": "cephfs-ec", "group_name": "subvolgroup_2"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)
        log.info("Create 2 Sub volumes on each of the subvolume group Size 20 GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_2",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_2",
                "size": "5368706371",
            },
            {"vol_name": default_fs, "subvol_name": "subvol_5", "size": "5368706371"},
            {"vol_name": default_fs, "subvol_name": "subvol_6", "size": "5368706371"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_7", "size": "5368706371"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_8", "size": "5368706371"},
            {"vol_name": default_fs, "subvol_name": "subvol_9", "size": "5368706371"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_10", "size": "5368706371"},
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client1"
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {default_fs} subvol_5"
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )

        log.info(
            "Mount 1 subvolumeon kernal and 1 subvloumegroup/subvolume on Fuse → Client2"
        )
        kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")

        subvol_path, rc = clients[1].exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {default_fs} subvol_6"
        )
        fs_util.kernel_mount(
            [clients[1]],
            kernel_mounting_dir_2,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[1].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_2",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [clients[1]],
            fuse_mounting_dir_2,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )

        log.info(
            "On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2"
        )
        if build.startswith("5"):
            kernel_mounting_dir_3 = f"/mnt/cephfs_kernel{mounting_dir}_EC_3/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_3 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_3,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.read().decode().strip()}",
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[0].exec_command(
                sudo=True, cmd="ceph fs subvolume getpath cephfs-ec subvol_7"
            )
            fuse_mounting_dir_3 = f"/mnt/cephfs_fuse{mounting_dir}_EC_3/"
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_3,
                extra_params=f" -r {subvol_path.read().decode().strip()} --client_fs cephfs-ec",
            )

        log.info(
            "On EC,Mount 1 subvolumeon kernal and 1 subvloumegroup/subvolume on Fuse → Client1"
        )
        if build.startswith("5"):
            kernel_mounting_dir_4 = f"/mnt/cephfs_kernel{mounting_dir}_EC_4/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[1].exec_command(
                sudo=True, cmd="ceph fs subvolume getpath cephfs-ec subvol_8"
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_4,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.read().decode().strip()}",
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_4 subvolgroup_2",
            )
            fuse_mounting_dir_4 = f"/mnt/cephfs_fuse{mounting_dir}_EC_4/"
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_4,
                extra_params=f" -r {subvol_path.read().decode().strip()} --client_fs cephfs-ec",
            )

        run_ios(clients[0], kernel_mounting_dir_1)
        run_ios(clients[0], fuse_mounting_dir_1)
        run_ios(clients[1], kernel_mounting_dir_2)
        run_ios(clients[1], fuse_mounting_dir_2)
        if build.startswith("5"):
            run_ios(clients[0], kernel_mounting_dir_3)
            run_ios(clients[1], kernel_mounting_dir_4)
            run_ios(clients[0], fuse_mounting_dir_3)
            run_ios(clients[1], fuse_mounting_dir_4)

        log.info("Create Snapshots.Verify the snap ls")
        snapshot_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "snap_name": "snap_1",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "snap_name": "snap_2",
                "group_name": "subvolgroup_2",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "snap_name": "snap_3",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "snap_name": "snap_4",
                "group_name": "subvolgroup_2",
            },
            {"vol_name": default_fs, "subvol_name": "subvol_5", "snap_name": "snap_5"},
            {"vol_name": default_fs, "subvol_name": "subvol_6", "snap_name": "snap_6"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_7", "snap_name": "snap_7"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_8", "snap_name": "snap_8"},
        ]
        for snapshot in snapshot_list:
            fs_util.create_snapshot(clients[0], **snapshot)

        clone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "snap_name": "snap_1",
                "target_subvol_name": "clone_1",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "snap_name": "snap_2",
                "target_subvol_name": "clone_2",
                "group_name": "subvolgroup_2",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "snap_name": "snap_3",
                "target_subvol_name": "clone_3",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "snap_name": "snap_4",
                "target_subvol_name": "clone_4",
                "group_name": "subvolgroup_2",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_5",
                "snap_name": "snap_5",
                "target_subvol_name": "clone_5",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_6",
                "snap_name": "snap_6",
                "target_subvol_name": "clone_6",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_7",
                "snap_name": "snap_7",
                "target_subvol_name": "clone_7",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_8",
                "snap_name": "snap_8",
                "target_subvol_name": "clone_8",
            },
        ]
        for clone in clone_list:
            fs_util.create_clone(clients[0], **clone)
        log.info(
            "Set File Level Quota on 2 directories under subvolume and Size Level Quota on "
            "under 2 directories under subvolume"
        )
        subvol_path, rc = clients[0].exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {default_fs} subvol_9"
        )
        fuse_mounting_dir_5 = f"/mnt/cephfs_fuse{mounting_dir}_5/"
        fs_util.fuse_mount(
            [clients[1]],
            fuse_mounting_dir_5,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )
        clients[1].exec_command(
            sudo=True,
            cmd=f"setfattr -n ceph.quota.max_files -v 10 {fuse_mounting_dir_5}",
        )
        clients[1].exec_command(
            sudo=True, cmd=f"getfattr -n ceph.quota.max_files {fuse_mounting_dir_5}"
        )
        out, rc = clients[1].exec_command(
            sudo=True,
            cmd=f"cd {fuse_mounting_dir_5};touch quota{{1..15}}.txt",
        )
        log.info(out)
        if clients[1].node.exit_status == 0:
            log.warning(
                "Quota set has been failed,Able to create more files."
                "This is known limitation"
            )
        if build.startswith("5"):
            subvol_path, rc = clients[0].exec_command(
                sudo=True, cmd="ceph fs subvolume getpath cephfs-ec subvol_10"
            )
            kernel_mounting_dir_5 = f"/mnt/cephfs_kernel{mounting_dir}_5/"
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_5,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.read().decode().strip()}",
                extra_params=",fs=cephfs-ec",
            )
            clients[1].exec_command(
                sudo=True,
                cmd=f"setfattr -n ceph.quota.max_files -v 10 {kernel_mounting_dir_5}",
            )
            clients[1].exec_command(
                sudo=True,
                cmd=f"getfattr -n ceph.quota.max_files {kernel_mounting_dir_5}",
            )

            out, rc = clients[1].exec_command(
                sudo=True,
                cmd=f"cd {kernel_mounting_dir_5};touch quota{{1..15}}.txt",
            )
            log.info(out)
            if clients[1].node.exit_status == 0:
                log.warning(
                    "Quota set has been failed,Able to create more files."
                    "This is known limitation"
                )
                # return 1

        log.info("Clean up the system")
        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[0]], mounting_dir=kernel_mounting_dir_1
        )
        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[1]], mounting_dir=kernel_mounting_dir_2
        )
        if build.startswith("5"):
            fs_util.client_clean_up(
                "umount",
                kernel_clients=[clients[0]],
                mounting_dir=kernel_mounting_dir_3,
            )

            fs_util.client_clean_up(
                "umount",
                kernel_clients=[clients[1]],
                mounting_dir=kernel_mounting_dir_4,
            )
            fs_util.client_clean_up(
                "umount",
                kernel_clients=[clients[1]],
                mounting_dir=kernel_mounting_dir_5,
            )
        fs_util.client_clean_up(
            "umount", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir_1
        )
        fs_util.client_clean_up(
            "umount", fuse_clients=[clients[1]], mounting_dir=fuse_mounting_dir_2
        )
        if build.startswith("5"):
            fs_util.client_clean_up(
                "umount", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir_3
            )
            fs_util.client_clean_up(
                "umount", fuse_clients=[clients[1]], mounting_dir=fuse_mounting_dir_4
            )
        fs_util.client_clean_up(
            "umount", fuse_clients=[clients[1]], mounting_dir=fuse_mounting_dir_5
        )
        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true"
        )
        rmsnapshot_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "snap_name": "snap_1",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "snap_name": "snap_2",
                "group_name": "subvolgroup_2",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "snap_name": "snap_3",
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "snap_name": "snap_4",
                "group_name": "subvolgroup_2",
            },
            {"vol_name": default_fs, "subvol_name": "subvol_5", "snap_name": "snap_5"},
            {"vol_name": default_fs, "subvol_name": "subvol_6", "snap_name": "snap_6"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_7", "snap_name": "snap_7"},
            {"vol_name": "cephfs-ec", "subvol_name": "subvol_8", "snap_name": "snap_8"},
        ]
        for snapshot in rmsnapshot_list:
            fs_util.remove_snapshot(clients[0], **snapshot)

        rmclone_list = [
            {"vol_name": default_fs, "subvol_name": "clone_1"},
            {"vol_name": default_fs, "subvol_name": "clone_2"},
            {"vol_name": "cephfs-ec", "subvol_name": "clone_3"},
            {"vol_name": "cephfs-ec", "subvol_name": "clone_4"},
            {"vol_name": default_fs, "subvol_name": "clone_5"},
            {"vol_name": default_fs, "subvol_name": "clone_6"},
            {"vol_name": "cephfs-ec", "subvol_name": "clone_7"},
            {"vol_name": "cephfs-ec", "subvol_name": "clone_8"},
        ]
        rmsubvolume_list = rmclone_list + subvolume_list

        for subvolume in rmsubvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #26
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. Fill 60% date of the cluster
    Test operation:
    1. Create a volume
    2. Mount the cephfs on both fuse and kernel clients
    3. Create few directory from the both clients
    4. Execute the command "ceph fs set <fs_name> max_mds n [where n is the number]"
    5. Check if the number of mds increases and decreases properly
    """
    try:
        tc = "CEPH-83573462"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        client2 = clients[1]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        mon_node_ips = fs_util.get_mon_node_ips()
        kernel_dir_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5))
        )
        kernel_mounting_dir = f"/mnt/cephfs_kernel{kernel_dir_generate}/"
        fs_util.auth_list([client1])
        fs_util.kernel_mount([client1], kernel_mounting_dir, ",".join(mon_node_ips))
        client1.exec_command(
            sudo=True,
            cmd=f"dd if=/dev/zero of={kernel_mounting_dir}" + ".txt bs=5M count=1000",
            long_running=True,
        )
        for i in range(10):
            dir_name_generate = "".join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in list(range(5))
            )
            client1.exec_command(
                sudo=True, cmd=f"mkdir {kernel_mounting_dir}dir_{dir_name_generate}"
            )
        fuse_dir_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5))
        )
        fuse_mounting_dir = f"/mnt/cephfs_fuse{fuse_dir_generate}/"
        client2.exec_command(sudo=True, cmd="dnf install ceph-fuse")
        fs_util.auth_list([client2])
        fs_util.fuse_mount([client2], fuse_mounting_dir)
        for i in range(10):
            dir_name_generate = "".join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in list(range(5))
            )
            client2.exec_command(
                sudo=True, cmd=f"mkdir {fuse_mounting_dir}dir_{dir_name_generate}"
            )
        c1_out, c1_result = client1.exec_command(
            sudo=True, cmd="ceph fs get cephfs -f json"
        )
        decoded_out = json.loads(c1_out)
        number_of_up_temp = decoded_out["mdsmap"]["up"]
        number_of_up = len(number_of_up_temp)
        number_of_mds_max = decoded_out["mdsmap"]["max_mds"]
        c1_out2, result2 = client1.exec_command(sudo=True, cmd="ceph -s -f json")
        decoded_out2 = json.loads(c1_out2)
        number_of_standby = decoded_out2["fsmap"]["up:standby"]
        log.info(number_of_standby)
        counts = number_of_standby
        for i in range(counts):
            number_of_mds_max = number_of_mds_max + 1
            client1.exec_command(
                sudo=True, cmd=f"ceph fs set cephfs max_mds {str(number_of_mds_max)}"
            )
            number_of_standby = number_of_standby - 1
            number_of_up = number_of_up + 1
            time.sleep(50)
            kernel_output, kernel_result = client1.exec_command(
                sudo=True, cmd="ceph fs get cephfs -f json"
            )
            kernel_decoded = json.loads(kernel_output)
            current_max_mds = kernel_decoded["mdsmap"]["max_mds"]
            kernel_output2, kernel_result2 = client1.exec_command(
                sudo=True, cmd="ceph -s -f json"
            )
            kernel_decoded2 = json.loads(kernel_output2)
            current_standby = kernel_decoded2["fsmap"]["up:standby"]
            if current_max_mds != number_of_mds_max:
                return 1
            if number_of_up != number_of_mds_max:
                return 1
            if number_of_standby != current_standby:
                return 1
        for i in range(counts):
            number_of_mds_max = number_of_mds_max - 1
            client1.exec_command(
                sudo=True, cmd=f"ceph fs set cephfs max_mds {str(number_of_mds_max)}"
            )
            number_of_standby = number_of_standby + 1
            number_of_up = number_of_up - 1
            time.sleep(50)
            kernel_output, kernel_result = client1.exec_command(
                sudo=True, cmd="ceph fs get cephfs -f json"
            )
            kernel_decoded = json.loads(kernel_output)
            current_max_mds = kernel_decoded["mdsmap"]["max_mds"]
            kernel_output2, kernel_result2 = client1.exec_command(
                sudo=True, cmd="ceph -s -f json"
            )
            kernel_decoded2 = json.loads(kernel_output2)
            current_standby = kernel_decoded2["fsmap"]["up:standby"]
            if current_max_mds != number_of_mds_max:
                return 1
            if number_of_up != number_of_mds_max:
                return 1
            if number_of_standby != current_standby:
                return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #27
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. Create a subvolume with Create a subvolume with sufficient data (around 500 files of 1 MB each)
    2. Create a snapshot of the above subvolume
    3. Create 4 number of clones from above snapshot

    Test operation:
    1. When the clone is in 'in-progress' state, delete the all the clone subvolumes with force option.
    2. Check if clone operation status is in 'in-progress' state
    3. Writing sufficient data in step 1 would provide enough time for you achieve that
    4. Try to delete the subvolume of the clone in 'in-progress' state
    5. The subvolume should not be able to be deleted
    6. Try to cancel the cloning
    7. After canceling the cloning, it should be able to delete the subvolume
    """
    try:
        bz = "1980920"
        tc = "CEPH-83574681"
        fs_util = FsUtils(ceph_cluster)
        log.info(f"Running CephFS tests for BZ-{bz}")
        log.info(f"Running CephFS tests for BZ-{tc}")
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        create_cephfs = "ceph fs volume create cephfs"
        client1.exec_command(sudo=True, cmd=create_cephfs)
        subvolume_name_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5))
        )
        subvolume = {
            "vol_name": "cephfs",
            "subvol_name": f"subvol_{subvolume_name_generate}",
            "size": "5368706371",
        }
        subvolume_name = subvolume["subvol_name"]
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rcc = client1.exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath cephfs {subvolume_name}"
        )
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.auth_list([client1])
        fs_util.kernel_mount(
            [client1],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        log.info("Checking Pre-requisites")
        fs_util.create_snapshot(
            client1, "cephfs", subvolume_name, f"subvol_1_snap{subvolume_name}"
        )
        for i in range(1, 4):
            new_subvolume_name = f"subvol_1_snap_clone{subvolume_name}{str(i)}"
            fs_util.create_clone(
                client1,
                "cephfs",
                subvolume_name,
                f"subvol_1_snap{subvolume_name}",
                new_subvolume_name,
            )
            out1, err1 = client1.exec_command(
                sudo=True, cmd=f"ceph fs clone status cephfs {new_subvolume_name}"
            )
            output1 = json.loads(out1)
            output2 = output1["status"]["state"]
            log.info(new_subvolume_name + " status: " + str(output2))
            if output2 == "in-progress":
                result, error = client1.exec_command(
                    sudo=True,
                    cmd=f"ceph fs subvolume rm cephfs {new_subvolume_name} --force",
                    check_ec=False,
                )
                log.info("Subvolume Remove Executed")
                error_result = error
                if "clone in-progress" in error_result:
                    log.info("Clone is in-progress as expected")
                client1.exec_command(
                    sudo=True, cmd=f"ceph fs clone cancel cephfs {new_subvolume_name}"
                )
            result2, error2 = client1.exec_command(
                sudo=True, cmd=f"ceph fs clone status cephfs {new_subvolume_name}"
            )
            out1 = json.loads(result2)
            out2 = out1["status"]["state"]
            if out2 == "canceled":
                fs_util.remove_subvolume(
                    client1, "cephfs", new_subvolume_name, force=True
                )
        fs_util.remove_snapshot(
            client1, "cephfs", subvolume_name, f"subvol_1_snap{subvolume_name}"
        )
        fs_util.remove_subvolume(client1, "cephfs", subvolume_name)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #28
0
def run(ceph_cluster, **kw):
    """
    CEPH-83574028 - Ensure the path of the nfs export is displayed properly.
    Pre-requisites:
    1. Create cephfs volume
       creats fs volume create <vol_name>
    2. Create nfs cluster
       ceph nfs cluster create <nfs_name> <nfs_server>

    Test operation:
    1. Create cephfs nfs export
       ceph nfs export create cephfs <fs_name> <nfs_name> <nfs_export_name> path=<export_path>
    2. Verify path of cephfs nfs export
       ceph nfs export get <nfs_name> <nfs_export_name>

    Clean-up:
    1. Remove cephfs nfs export
    """
    try:
        tc = "CEPH-83574028"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        rhbuild = config.get("rhbuild")
        nfs_servers = ceph_cluster.get_ceph_objects("nfs")
        nfs_server = nfs_servers[0].node.hostname
        nfs_name = "cephfs-nfs"
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        client1.exec_command(sudo=True, cmd="ceph mgr module enable nfs")
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ceph nfs cluster create {nfs_name} {nfs_server}")
        if wait_for_process(client=client1,
                            process_name=nfs_name,
                            ispresent=True):
            log.info("ceph nfs cluster created successfully")
        else:
            raise CommandFailed("Failed to create nfs cluster")
        nfs_export_name = "/export_" + "".join(
            secrets.choice(string.digits) for i in range(3))
        export_path = "/"
        fs_name = "cephfs"
        if "5.0" in rhbuild:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name} "
                f"{nfs_export_name} path={export_path}",
            )
        else:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {nfs_name} "
                f"{nfs_export_name} {fs_name} path={export_path}",
            )
        out, rc = client1.exec_command(sudo=True,
                                       cmd=f"ceph nfs export ls {nfs_name}")

        if nfs_export_name not in out:
            raise CommandFailed("Failed to create nfs export")

        log.info("ceph nfs export created successfully")
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ceph nfs export get {nfs_name} {nfs_export_name}")
        output = json.loads(out)
        export_get_path = output["path"]
        if export_get_path != export_path:
            log.error("Export path is not correct")
            return 1

        log.info("Test completed successfully")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up")
        client1.exec_command(
            sudo=True,
            cmd=f"ceph nfs export delete {nfs_name} {nfs_export_name}",
            check_ec=False,
        )
예제 #29
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name> --gid <num> --uid <num>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create cephfs subvolumegroup with customized uid and gid ")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1",
                "uid": "20",
                "gid": "30",
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1",
                "uid": "40",
                "gid": "50",
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info(
            "Create 2 Sub volumes on each of the subvolume group Size 5 GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernel and 1 subvloume on Fuse → Client1"
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
        )

        log.info(
            "On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2"
        )
        if build.startswith("5"):
            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_3 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_4 subvolgroup_ec1",
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_2,
                extra_params=" --client_fs cephfs-ec",
            )

        log.info("Get the path of subvolume group")
        subvolgroup_default, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolumegroup getpath {default_fs} subvolgroup_1",
        )
        subvolgroup_default_path = subvolgroup_default.read().decode().strip()
        subvolgroup_ec, rc = clients[0].exec_command(
            sudo=True,
            cmd="ceph fs subvolumegroup getpath cephfs-ec subvolgroup_ec1",
        )
        subvolgroup_ec_path = subvolgroup_ec.read().decode().strip()

        def get_defined_uid(group_name, subvolumegroup_list):
            for subvolumegroup in subvolumegroup_list:
                if group_name == subvolumegroup["group_name"]:
                    return subvolumegroup.get("uid")

        log.info("Validate the uid of the subgroup")
        subgroup_1_uid = get_defined_uid(
            "subvolgroup_1", subvolumegroup_list=subvolumegroup_list)
        subgroup_2_uid = get_defined_uid(
            "subvolgroup_ec1", subvolumegroup_list=subvolumegroup_list)
        stat_of_uid_on_kernel_default_fs = fs_util.get_stats(
            client=clients[0],
            file_path=kernel_mounting_dir_1.rstrip("/") +
            subvolgroup_default_path,
            format="%u",
        )
        stat_of_uid_on_kernel_default_ec = fs_util.get_stats(
            client=clients[1],
            file_path=kernel_mounting_dir_2.rstrip("/") + subvolgroup_ec_path,
            format="%u",
        )
        stat_of_uid_on_fuse_default_fs = fs_util.get_stats(
            client=clients[0],
            file_path=fuse_mounting_dir_1.rstrip("/") +
            subvolgroup_default_path,
            format="%u",
        )
        stat_of_uid_on_fuse_default_ec = fs_util.get_stats(
            client=clients[1],
            file_path=fuse_mounting_dir_2.rstrip("/") + subvolgroup_ec_path,
            format="%u",
        )
        if int(subgroup_1_uid) != int(
                stat_of_uid_on_kernel_default_fs) and int(
                    subgroup_1_uid) != int(stat_of_uid_on_fuse_default_fs):
            log.error("UID is mismatching on sunvolgroup_1")
            return 1
        if int(subgroup_2_uid) != int(stat_of_uid_on_fuse_default_ec) and int(
                subgroup_2_uid) != int(stat_of_uid_on_kernel_default_ec):
            log.error("UID is mismatching on subvolgroup_ec1")
            return 1

        def get_defined_gid(group_name, subvolumegroup_list):
            for subvolumegroup in subvolumegroup_list:
                if group_name == subvolumegroup["group_name"]:
                    return subvolumegroup.get("gid")

        log.info("Validate the gid of the subgroup")
        subgroup_1_gid = get_defined_gid(
            "subvolgroup_1", subvolumegroup_list=subvolumegroup_list)
        subgroup_2_gid = get_defined_gid(
            "subvolgroup_ec1", subvolumegroup_list=subvolumegroup_list)
        stat_of_gid_on_kernel_default_fs = fs_util.get_stats(
            client=clients[0],
            file_path=kernel_mounting_dir_1.rstrip("/") +
            subvolgroup_default_path,
            format="%g",
        )
        stat_of_gid_on_kernel_default_ec = fs_util.get_stats(
            client=clients[1],
            file_path=kernel_mounting_dir_2.rstrip("/") + subvolgroup_ec_path,
            format="%g",
        )
        stat_of_gid_on_fuse_default_fs = fs_util.get_stats(
            client=clients[0],
            file_path=fuse_mounting_dir_1.rstrip("/") +
            subvolgroup_default_path,
            format="%g",
        )
        stat_of_gid_on_fuse_default_ec = fs_util.get_stats(
            client=clients[1],
            file_path=fuse_mounting_dir_2.rstrip("/") + subvolgroup_ec_path,
            format="%g",
        )
        if int(subgroup_1_gid) != int(
                stat_of_gid_on_kernel_default_fs) and int(
                    subgroup_1_gid) != int(stat_of_gid_on_fuse_default_fs):
            log.error("GID is mismatching on sunvolgroup_1")
            return 1
        if int(subgroup_2_gid) != int(
                stat_of_gid_on_kernel_default_ec) and int(
                    subgroup_2_gid) != int(stat_of_gid_on_fuse_default_ec):
            log.error("GID is mismatching on subvolgroup_ec1")
            return 1

        run_ios(clients[0], kernel_mounting_dir_1)
        run_ios(clients[0], fuse_mounting_dir_1)
        run_ios(clients[1], kernel_mounting_dir_2)
        run_ios(clients[1], fuse_mounting_dir_2)

        log.info("Clean up the system")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)

        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[0]],
                                mounting_dir=kernel_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[1]],
                                mounting_dir=kernel_mounting_dir_2)

        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[1]],
                                mounting_dir=fuse_mounting_dir_2)

        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #30
0
def run(ceph_cluster, **kw):
    """
    CEPH-83574027 - Ensure creation of Subvolgroups, subvolumes works on NFS exports and run IO from nfs clients
    Pre-requisites:
    1. Create cephfs volume
       creats fs volume create <vol_name>
    2. Create nfs cluster
       ceph nfs cluster create <nfs_name> <nfs_server>

    Test operation:
    1. Create cephfs nfs export
       ceph nfs export create cephfs <fs_name> <nfs_name> <nfs_export_name> path=<export_path>
    2. Crete 2 cephfs subvolume group
    3. Create cephfs subvolume in cephfs subvolume group
    4. Create cephfs subvolume in deafault cephfs subvolume group
    5. Mount nfs mount with cephfs export
       "mount -t nfs -o port=2049 <nfs_server>:<nfs_export> <nfs_mounting_dir>
    7. Verify subvolume groups & subvolumes are created
    6. Run IOs on both cephfs subvolumegroups & subvolumes

    Clean-up:
    1. Remove all the data in Cephfs file system
    2. Remove all the cephfs mounts
    3. Delete cephfs nfs export
    """
    try:
        tc = "CEPH-83574027"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        rhbuild = config.get("rhbuild")
        nfs_servers = ceph_cluster.get_ceph_objects("nfs")
        nfs_server = nfs_servers[0].node.hostname
        nfs_name = "cephfs-nfs"
        nfs_export_name = "/export_" + "".join(
            secrets.choice(string.digits) for i in range(3))
        export_path = "/"
        fs_name = "cephfs"
        nfs_mounting_dir = "/mnt/nfs_" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        if "5.0" in rhbuild:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name} "
                f"{nfs_export_name} path={export_path}",
            )
        else:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {nfs_name} "
                f"{nfs_export_name} {fs_name} path={export_path}",
            )
        subvolumegroup_list = [
            {
                "vol_name": fs_name,
                "group_name": "subvolgroup_1",
            },
            {
                "vol_name": fs_name,
                "group_name": "subvolgroup_2",
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": fs_name,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": fs_name,
                "subvol_name": "subvol_2",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)
        commands = [
            f"mkdir -p {nfs_mounting_dir}",
            f"mount -t nfs -o port=2049 {nfs_server}:{nfs_export_name} {nfs_mounting_dir}",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        out, rc = client1.exec_command(sudo=True,
                                       cmd=f"ls {nfs_mounting_dir}/volumes/")
        if "subvolgroup_1" not in out:
            raise CommandFailed("Subvolume group 1 creation failed")
        if "subvolgroup_2" not in out:
            raise CommandFailed("Subvolume group 2 creation failed")
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ls {nfs_mounting_dir}/volumes/subvolgroup_1")
        if "subvol_1" not in out:
            raise CommandFailed("Subvolume creation in subvolume group failed")
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ls {nfs_mounting_dir}/volumes/_nogroup")
        if "subvol_2" not in out:
            raise CommandFailed(
                "Subvolume creation in default subvolume group failed")
        commands = [
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 --files"
            f" 1000 --files-per-dir 10 --dirs-per-dir 2 --top {nfs_mounting_dir}/volumes/subvolgroup_1/subvol_1",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 10 --file-size 4 --files"
            f" 1000 --files-per-dir 10 --dirs-per-dir 2 --top {nfs_mounting_dir}/volumes/subvolgroup_1/subvol_1",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 8 "
            f"--files 2000 --files-per-dir 5 --dirs-per-dir 5 --top {nfs_mounting_dir}/volumes/_nogroup/subvol_2/",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 10 --file-size 8 "
            f"--files 2000 --files-per-dir 5 --dirs-per-dir 5 --top {nfs_mounting_dir}/volumes/_nogroup/subvol_2/",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 5 --file-size 16 "
            f"--files 4000 --files-per-dir 20 --dirs-per-dir 4 --top {nfs_mounting_dir}/volumes/subvolgroup_2",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 5 --file-size 16 "
            f"--files 4000 --files-per-dir 20 --dirs-per-dir 4 --top {nfs_mounting_dir}/volumes/subvolgroup_2",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        log.info("Test completed successfully")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up")
        client1.exec_command(sudo=True, cmd=f"rm -rf {nfs_mounting_dir}/*")
        client1.exec_command(sudo=True, cmd=f"umount {nfs_mounting_dir}")
        client1.exec_command(
            sudo=True,
            cmd=f"ceph nfs export delete {nfs_name} {nfs_export_name}",
            check_ec=False,
        )