def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. Create a subvolume group that does not exist
    Test operation:
    1. Try to delete the subvolume group
    2. Check if subvolume group deletion fails

    """
    try:

        tc = "CEPH-83574162"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        subvolume_group = "non_exist_subvolume_group_name"
        output, err = fs_util.remove_subvolumegroup(client1,
                                                    "cephfs",
                                                    subvolume_group,
                                                    check_ec=False)
        if output == 0:
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    Test operation:
    1. Create a subvolume_group name that does not exist
    2. Try to remove the subvolume_group name that does not exist
    3. The command should fail because the subvolume_group name does not exist.
    4. Check if the command is failed
    """
    try:

        tc = "CEPH-83574168"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        target_delete_subvolume_group = "non_exist_subvolume_group_name"
        c_out, c_err = fs_util.remove_subvolumegroup(
            client1,
            "cephfs",
            target_delete_subvolume_group,
            validate=False,
            check_ec=False,
        )
        if c_err:
            return 0
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Exemplo n.º 3
0
def run(ceph_cluster, **kw):
    """
    Test operation:
    1. Generate random name fo subvolume creation
    2. Create cephfs subvolume with name created in the first step
    3. Again create cephfs subvolume withg name created in first step with resize parameter
    4. Check if cephfs subvolume is resixed
    5. Remove cephfs subvolume
    6. Verify if trash directory is empty
    """
    try:

        tc = "CEPH-83574186"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        subvolume_name_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5)))
        subvolume = {
            "vol_name": "cephfs",
            "subvol_name": f"subvol_{subvolume_name_generate}",
            "size": "5368706371",
        }
        subvolume_name = subvolume["subvol_name"]
        fs_util.create_subvolume(client1, **subvolume)
        new_size = "26843531685"
        c_out, c_err = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume resize cephfs {subvolume_name} {new_size}",
        )
        c_out_result = json.loads(c_out.read().decode())
        target_size = c_out_result[1]["bytes_quota"]
        if int(target_size) != int(new_size):
            return 1
        c_out2, c_err2 = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume info cephfs {subvolume_name} -f json")
        c_out2_result = json.loads(c_out2.read().decode())
        target_quota = c_out2_result["bytes_quota"]
        if int(target_quota) != int(new_size):
            return 1
        fs_util.remove_subvolume(client1,
                                 "cephfs",
                                 subvolume_name,
                                 validate=True)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Exemplo n.º 4
0
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. Create a pool with "-" name
    Test operation:
    1. Create a cephfs
    2. Add the cephfs to the pool name with "-"
    3. Create a subvolume with in the cephfs attatched to the pool
    4. Run IOs
    5. Check if any failure happens during the operation

    """
    try:

        tc = "CEPH-83573528"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
        )
        pool_names = ["ceph-fs-pool"]

        for pool_name in pool_names:
            client1.exec_command(f"ceph osd pool create {pool_name}")
            output, err = client1.exec_command(
                f"ceph fs add_data_pool cephfs {pool_name}"
            )
            if output == 1:
                return 1
            subvol_name = "".join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in list(range(10))
            )
            fs_util.create_subvolume(client1, "cephfs", f"subvol_{subvol_name}")
            run_ios(client1, kernel_mounting_dir_1)
            fs_util.remove_subvolume(client1, "cephfs", f"subvol_{subvol_name}")

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. Create a volume with a name
    2. Create a subvolume with a name
    3. Create a subvolume group with a name
    Test operation:
    1. Try to create a volume with the same name
    2. Try to create a subvolume with the same name
    3. Try to create a subvolume group with the same name
    """
    try:
        tc = "CEPH-83573428"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        fs_util.prepare_clients(clients, build)

        random_name = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5)))
        volume_name = "vol_01" + random_name
        subvolume_name = "subvol_01" + random_name
        subvolume_group_name = "subvol_group_name_01" + random_name
        log.info("Ceph Build number is " + build[0])
        fs_util.create_fs(client1, volume_name)
        fs_util.create_subvolume(client1, volume_name, subvolume_name)
        fs_util.create_subvolumegroup(client1, "cephfs", subvolume_group_name)
        output1, err1 = fs_util.create_fs(client1, volume_name, check_ec=False)
        output2, err2 = fs_util.create_subvolume(client1,
                                                 volume_name,
                                                 subvolume_name,
                                                 check_ec=False)
        output3, err3 = fs_util.create_subvolumegroup(client1,
                                                      volume_name,
                                                      subvolume_name,
                                                      check_ec=False)
        if output1 == 0 or output2 == 0 or output3 == 0:
            return 1
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Exemplo n.º 6
0
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. Create a random subvolume_group name that does not exist
    Test operation:
    1. Try to create a subvolume on the subvolume_group name
    2. If the creation fails, memo it
    3. Try to delete a subvolume on the subvolume_group name
    4. If the deletion fails, memo it.
    5. If the creation and deletion are failed, return 0
    """
    try:

        tc = "CEPH-83574161"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        result = 0
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        subvolume_group = "non_exist_subvolume_group_name"
        subvolume_name_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5)))
        subvolume = {
            "vol_name": "cephfs",
            "subvol_name": f"subvol_{subvolume_name_generate}",
            "size": "5368706371",
            "group_name": f"{subvolume_group}",
        }
        c_out, c_err = fs_util.create_subvolume(client1,
                                                **subvolume,
                                                validate=False,
                                                check_ec=False)
        if c_err:
            result = result + 1
        c_out2, c_err2 = fs_util.remove_subvolume(client1,
                                                  **subvolume,
                                                  validate=False,
                                                  check_ec=False)
        if c_err2:
            result = result + 1
        if result != 2:
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Exemplo n.º 7
0
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. prepare invalid pool_name
    Test operation:
    1. Try to create a subvolume in a invalid pool
    2. Check if the subvolume  is not created because of the invalid pool
    3. Using get_path, check if subvolume path is cleaned up
    """
    try:

        tc = "CEPH-83574192"
        log.info(f"Running CephFS tests for Polarion ID -{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        subvol_name = "subvol_name".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5))
        )
        invalid_pool_name = "non_exist_pool"
        out1, err1 = fs_util.create_subvolume(
            client1,
            "cephfs",
            subvol_name,
            validate=False,
            check_ec=False,
            pool_layout=invalid_pool_name,
        )
        out2, err2 = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath cephfs {subvol_name}",
            check_ec=False,
        )
        if out1 == 0 or out2 == 0:
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Exemplo n.º 8
0
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. Prepare isolated_namespace name
    Test operation:
    1. Create a subvolume with isolated_namespace option
    2. Check if the creation is successful
    3. After the creation, check if the subvolume is created in isolated namespace using `ceph fs subvolume info`
    4. Remove the subvolume
    """
    try:

        tc = "CEPH-83574187"
        log.info(f"Running CephFS tests for Polarion ID -{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        random_name = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5)))
        subvol_name = "subvol_name_" + random_name
        namespace = "namespace_" + random_name
        fs_util.create_subvolume(client1,
                                 "cephfs",
                                 subvol_name,
                                 namespace_isolated=namespace)
        out1, err1 = client1.exec_command(
            sudo=True, cmd=f"ceph fs subvolume info cephfs {subvol_name}")
        isolated_pool_name = f"fsvolumens_{subvol_name}"
        output1 = json.loads(out1.read().decode())
        target_ns_name = output1["pool_namespace"]
        if target_ns_name != isolated_pool_name:
            log.error("Isolated namespace name are not identical")
            return 1
        fs_util.remove_subvolume(client1, "cephfs", subvol_name)
        return 0

    except Exception as e:
        log.error(e)
        log.error(traceback.format_exc())
        return 1
Exemplo n.º 9
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573873   Try creating 2 Filesystem using same Pool(negative)
    Pre-requisites :
    1. We need atleast one client node to execute this test case

    Test Case Flow:
    1. Check if cephfs filesystem is present, if not create cephfs
    2. collect data pool and meta datapool info of cephfs
    3. try creating cephfs1 with data pool and meta datapool of cephfs
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_details = fs_util.get_fs_info(client1)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs new cephfs1 {fs_details['metadata_pool_name']} {fs_details['data_pool_name']}",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed(
                "We are able to create filesystems with same pool used by other filesystem"
            )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):

    """
    Test operation:
    1. Create a subvolume
    2. Check info for the subvolume
    3. Check if gid and uid are set to 0
    """
    try:
        tc = "CEPH-83574181"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        subvolume_name_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5))
        )
        subvolume = {
            "vol_name": "cephfs",
            "subvol_name": f"subvol_{subvolume_name_generate}",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume, check_ec=False)
        c_out, c_err = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume info cephfs subvol_{subvolume_name_generate}",
        )
        c_out_decoded = json.loads(c_out.read().decode())
        gid = c_out_decoded["gid"]
        uid = c_out_decoded["uid"]
        if gid != 0 or uid != 0:
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Exemplo n.º 11
0
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. prepare invalid pool_name
    Test operation:
    1. Try to create a subvolume group with invalid pool
    2. Check if the subvolume group is not created beacuse of the invalid pool
    3. Using get_path, check if subvolumegroup path is cleaned up
    """
    try:
        tc = "CEPH-83574163"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        subvol_group_name = "subvol_group"
        invalid_pool_name = "non_exist_pool"
        out1, err1 = fs_util.create_subvolumegroup(
            client1, "cephfs", subvol_group_name, pool_layout=invalid_pool_name
        )
        out2, err2 = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath cephfs {subvol_group_name}",
            check_ec=False,
        )
        if out1 == 0 or out2 == 0:
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Exemplo n.º 12
0
def run(ceph_cluster, **kw):
    """
     Test Cases Covered:
     CEPH-83573255	Try renaming the snapshot directory and rollbackCreate a FS and
                     create 10 directories and mount them on kernel client and fuse client(5 mounts each)
                     Add data (~ 100 GB). Create a Snapshot and verify the content in snap directory.
                     Try modifying the snapshot name.

     Pre-requisites :
     1. We need atleast one client node to execute this test case
     2. creats fs volume create cephfs if the volume is not there
     3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
         Ex : ceph fs subvolumegroup create cephfs subvol_rename_sanp_1
     4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
        [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
        Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    5. Create snapshot of the subvolume
         Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvol_rename_sanp_1

     Script Flow:
     1. Mount the subvolume on the client using fuse mount.
     2. Rename the snapshot snap/_snap_1 to snap/_snap_rename.
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvol_rename_sanp_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_rename_sanp",
            "group_name": "subvol_rename_sanp_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_rename_sanp subvol_rename_sanp_1",
        )
        subvol_path = subvol_path.strip()
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path}",
        )
        fs_util.create_file_data(client1, fuse_mounting_dir_1, 3, "snap1",
                                 "data_from_fuse_mount ")
        fuse_snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_rename_sanp",
            "snap_name": "snap_1",
            "group_name": "subvol_rename_sanp_1",
        }
        fs_util.create_snapshot(client1, **fuse_snapshot)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"cd {fuse_mounting_dir_1};mv .snap/_snap_1_* .snap/_snap_rename_",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed(
                "we are able to rename the snap directory.. which is not correct"
            )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **fuse_snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
Exemplo n.º 13
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573409    Test to validate the removal of quota_max_bytes
                     Create a FS and create 10 directories and mount them on kernel and fuse client(5 mounts each)
                     Set max bytes quota to a number(say 1Gb) and fill data until it reaches the limit and
                     verify if the set quota limit is working fine.
                     Remove the quota once it reaches the max bytes and try adding more
                     data, verify if set quota is removed. Repeat the procedure for few more times.


    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. set byte attribute 1gb on both mount points
    4. Create 3gb bytes and check it fails
    5. Perform same on kernel mount
    6. Remove the quota of bytes and try creating the files on the same directory used in step 3 ie., set bytes to 0
    7. Perform same on Kernel mount
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_quota_byte_remove_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_remove_fuse",
                "group_name": "subvolgroup_quota_byte_remove_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_remove_kernel",
                "group_name": "subvolgroup_quota_byte_remove_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_byte_remove_kernel subvolgroup_quota_byte_remove_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_byte_remove_fuse subvolgroup_quota_byte_remove_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )
        fs_util.set_quota_attrs(clients[0], 100, 1073741824,
                                fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        log.info("Removing the quota to 0 and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 100, "0", fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        fs_util.set_quota_attrs(clients[0], 100, 1073741824,
                                kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        log.info("Increasing the quota to 0 and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 100, "0", kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
Exemplo n.º 14
0
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. Create cephfs subvolumegroup
    2. Create cephfs subvolume in subvolume group with default permission
    3. Create cephfs subvolumes in subvolumegroup with different permission
    Test operation:
    1. Get path of all created subvolumes
    2. Remove all the subvolumes
    3. Remove the subvolumegroup
    """
    try:
        tc = "CEPH-83574190"
        log.info(f"Running CephFS tests for Polarion ID -{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        vol_name_rand = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5)))
        subvol_group = f"subvolume_groupname_{vol_name_rand}"
        subvol_default = f"subvol_default_{vol_name_rand}"
        subvol_different = f"subvol_different_{vol_name_rand}"
        fs_util.create_subvolumegroup(client1, "cephfs", subvol_group)
        fs_util.create_subvolume(client1,
                                 "cephfs",
                                 subvol_default,
                                 group_name=subvol_group)
        fs_util.create_subvolume(client1,
                                 "cephfs",
                                 subvol_different,
                                 group_name=subvol_group,
                                 mode=777)
        out1 = fs_util.get_subvolume_info(client1,
                                          "cephfs",
                                          subvol_default,
                                          group_name=subvol_group)
        out2 = fs_util.get_subvolume_info(client1,
                                          "cephfs",
                                          subvol_different,
                                          group_name=subvol_group)
        out3, err3 = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath cephfs {subvol_default} --group_name {subvol_group}",
        )
        out4, err4 = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath cephfs {subvol_different} --group_name {subvol_group}",
        )
        mode1 = out1["mode"]
        mode2 = out2["mode"]
        if str(mode1) != "16877" or str(
                mode2) != "16895" or out3 == 1 or out4 == 1:
            return 1
        fs_util.remove_subvolume(client1,
                                 "cephfs",
                                 subvol_default,
                                 group_name=subvol_group)
        fs_util.remove_subvolume(client1,
                                 "cephfs",
                                 subvol_different,
                                 group_name=subvol_group)
        fs_util.remove_subvolumegroup(client1,
                                      "cephfs",
                                      subvol_group,
                                      force=True)
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Exemplo n.º 15
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-11319	Create first snap add more data to original then create a second snap.
                Rollback 1st snap do data validation.
                Rollback 2nd snap and do data validation.Perform cross platform rollback
                i.e. take snap on kernel mount and perform rollback using fuse mount
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvol_cross_platform_snapshot_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume. We will add known data as we are going to verify the files
        Ex: create_file_data()
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvol_cross_platform_snapshot_1

    Script Flow:
    1. Mount the subvolume on the client using Kernel and fuse mount
    2. Write data into the fuse mount point i.e., data_from_fuse_mount
    3. Collect the checksum of the files
    4. Take snapshot at this point i.e., snap_1
    5. Write data into the kernel mount point i.e., data_from_kernel_mount
    6. Collect the checksum of the files
    7. Take snapshot at this point i.e., snap_2
    8. On Kernel mount revert the snapshot to snap_1 and compare the checksum of the files collected in step 3
    9. On Fuse mount revert the snapshot to snap_2 and compare the checksum of the files colleced in step 6

    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvol_cross_platform_snapshot_1"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "group_name": "subvol_cross_platform_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_cross_platform_snapshot"
            f" subvol_cross_platform_snapshot_1",
        )
        subvol_path = subvol_path.strip()
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path}",
        )
        fs_util.create_file_data(
            client1, fuse_mounting_dir_1, 3, "snap1", "data_from_fuse_mount "
        )
        fuse_files_checksum = fs_util.get_files_and_checksum(
            client1, fuse_mounting_dir_1
        )
        fuse_snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvol_cross_platform_snapshot_1",
        }
        fs_util.create_snapshot(client1, **fuse_snapshot)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path}",
        )
        fs_util.create_file_data(
            client1, kernel_mounting_dir_1, 3, "snap1", "data_from_kernel_mount "
        )
        kernel_snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "snap_name": "snap_2",
            "group_name": "subvol_cross_platform_snapshot_1",
        }
        fs_util.create_snapshot(client1, **kernel_snapshot)
        kernel_files_checksum = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1
        )
        client1.exec_command(
            sudo=True, cmd=f"cd {kernel_mounting_dir_1};cp .snap/_snap_1_*/* ."
        )
        kernel_mount_revert_snap_fuse = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1
        )
        if fuse_files_checksum != kernel_mount_revert_snap_fuse:
            log.error(
                "checksum is not when reverted to snap1 i.e., from fuse mount snapshot revert"
            )
            return 1
        client1.exec_command(
            sudo=True, cmd=f"cd {fuse_mounting_dir_1};cp .snap/_snap_2_*/* ."
        )
        fuse_mount_revert_snap_kernel = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1
        )
        if kernel_files_checksum != fuse_mount_revert_snap_kernel:
            log.error(
                "checksum is not when reverted to snap2 i.e., from kernel mount snapshot revert"
            )
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **kernel_snapshot)
        fs_util.remove_snapshot(client1, **fuse_snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
Exemplo n.º 16
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573520	Validate the max snapshot that can be created under a root FS sub volume level.
                    Increase by 50 at a time until it reaches the max limit.

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. creats fs volume create cephfs if the volume is not there
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_max_snap --size 5368706371 --group_name subvolgroup_1
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/

    Test Script Flow :
    1. We will create snapshots in batch of 50 till 1000.
    2. We will breakout when ever it reaches the maximum allowed snapshots.

    Clean up:
    1. Deletes all the snapshots created
    2. Deletes snapshot and subvolume created.
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_max_snap",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_max_snap",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot_list = [{
            "vol_name": default_fs,
            "subvol_name": "subvol_max_snap",
            "snap_name": f"snap_limit_{x}",
        } for x in range(1, 1000)]
        for i in range(0, 1000, 50):
            for snapshot in snapshot_list[i:i + 50]:
                try:
                    fs_util.create_snapshot(clients[0],
                                            **snapshot,
                                            validate=False)
                except CommandFailed:
                    log.info(
                        f"Max Snapshots allowed under a root FS sub volume level is {i}"
                    )
                    max_sanpshots_allowed = i
                    break
            else:
                continue
            break
            log.info(f"Snapshot creation is successful from {i} to {i+50}")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        for snapshot in snapshot_list[0:max_sanpshots_allowed]:
            fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
Exemplo n.º 17
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. Fill 60% date of the cluster
    Test operation:
    1. Create a volume
    2. Mount the cephfs on both fuse and kernel clients
    3. Create few directory from the both clients
    4. Execute the command "ceph fs set <fs_name> max_mds n [where n is the number]"
    5. Check if the number of mds increases and decreases properly
    """
    try:
        tc = "CEPH-83573462"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        client2 = clients[1]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        mon_node_ips = fs_util.get_mon_node_ips()
        kernel_dir_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5))
        )
        kernel_mounting_dir = f"/mnt/cephfs_kernel{kernel_dir_generate}/"
        fs_util.auth_list([client1])
        fs_util.kernel_mount([client1], kernel_mounting_dir, ",".join(mon_node_ips))
        client1.exec_command(
            sudo=True,
            cmd=f"dd if=/dev/zero of={kernel_mounting_dir}" + ".txt bs=5M count=1000",
            long_running=True,
        )
        for i in range(10):
            dir_name_generate = "".join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in list(range(5))
            )
            client1.exec_command(
                sudo=True, cmd=f"mkdir {kernel_mounting_dir}dir_{dir_name_generate}"
            )
        fuse_dir_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5))
        )
        fuse_mounting_dir = f"/mnt/cephfs_fuse{fuse_dir_generate}/"
        client2.exec_command(sudo=True, cmd="dnf install ceph-fuse")
        fs_util.auth_list([client2])
        fs_util.fuse_mount([client2], fuse_mounting_dir)
        for i in range(10):
            dir_name_generate = "".join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in list(range(5))
            )
            client2.exec_command(
                sudo=True, cmd=f"mkdir {fuse_mounting_dir}dir_{dir_name_generate}"
            )
        c1_out, c1_result = client1.exec_command(
            sudo=True, cmd="ceph fs get cephfs -f json"
        )
        decoded_out = json.loads(c1_out.read().decode())
        number_of_up_temp = decoded_out["mdsmap"]["up"]
        number_of_up = len(number_of_up_temp)
        number_of_mds_max = decoded_out["mdsmap"]["max_mds"]
        c1_out2, result2 = client1.exec_command(sudo=True, cmd="ceph -s -f json")
        decoded_out2 = json.loads(c1_out2.read().decode())
        number_of_standby = decoded_out2["fsmap"]["up:standby"]
        log.info(number_of_standby)
        counts = number_of_standby
        for i in range(counts):
            number_of_mds_max = number_of_mds_max + 1
            client1.exec_command(
                sudo=True, cmd=f"ceph fs set cephfs max_mds {str(number_of_mds_max)}"
            )
            number_of_standby = number_of_standby - 1
            number_of_up = number_of_up + 1
            time.sleep(50)
            kernel_output, kernel_result = client1.exec_command(
                sudo=True, cmd="ceph fs get cephfs -f json"
            )
            kernel_decoded = json.loads(kernel_output.read().decode())
            current_max_mds = kernel_decoded["mdsmap"]["max_mds"]
            kernel_output2, kernel_result2 = client1.exec_command(
                sudo=True, cmd="ceph -s -f json"
            )
            kernel_decoded2 = json.loads(kernel_output2.read().decode())
            current_standby = kernel_decoded2["fsmap"]["up:standby"]
            if current_max_mds != number_of_mds_max:
                return 1
            if number_of_up != number_of_mds_max:
                return 1
            if number_of_standby != current_standby:
                return 1
        for i in range(counts):
            number_of_mds_max = number_of_mds_max - 1
            client1.exec_command(
                sudo=True, cmd=f"ceph fs set cephfs max_mds {str(number_of_mds_max)}"
            )
            number_of_standby = number_of_standby + 1
            number_of_up = number_of_up - 1
            time.sleep(50)
            kernel_output, kernel_result = client1.exec_command(
                sudo=True, cmd="ceph fs get cephfs -f json"
            )
            kernel_decoded = json.loads(kernel_output.read().decode())
            current_max_mds = kernel_decoded["mdsmap"]["max_mds"]
            kernel_output2, kernel_result2 = client1.exec_command(
                sudo=True, cmd="ceph -s -f json"
            )
            kernel_decoded2 = json.loads(kernel_output2.read().decode())
            current_standby = kernel_decoded2["fsmap"]["up:standby"]
            if current_max_mds != number_of_mds_max:
                return 1
            if number_of_up != number_of_mds_max:
                return 1
            if number_of_standby != current_standby:
                return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Exemplo n.º 18
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573415	Test to validate the cli - ceph fs set <fs_name> allow_new_snaps true

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_flag_snapshot_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_flag_snapshot_1

    Retain the snapshots nad verify the data after cloning:
    1. Test allow_new_snaps value and try creating the snapshots

    Clean Up:
    1. Del all the snapshots created
    2. Del Subvolumes
    3. Del SubvolumeGroups
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_flag_snapshot_1"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "group_name": "subvolgroup_flag_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_retain_snapshot subvolgroup_flag_snapshot_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_flag_snapshot_1",
        }
        log.info("Test allow_new_snaps value and creating the snapshots")
        client1.exec_command(
            sudo=True, cmd=f"ceph fs set {default_fs} allow_new_snaps false"
        )
        cmd_out, cmd_rc = fs_util.create_snapshot(
            client1, **snapshot, check_ec=False, validate=False
        )
        if cmd_rc == 0:
            raise CommandFailed(
                f"ceph fs set {default_fs} allow_new_snaps false is not working properly"
            )
            return 1
        client1.exec_command(
            sudo=True, cmd=f"ceph fs set {default_fs} allow_new_snaps true"
        )
        fs_util.create_snapshot(client1, **snapshot)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **snapshot, validate=False, force=True)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
Exemplo n.º 19
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573420	Try writing the data to snap directory

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_write_snapshot_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_


    Script Flow:
    1. Mount the subvolume on the client using Kernel
    2. Write data into the mount point
    3. Try creating directory and file inside the mount_point/.snap directory(CEPH-83573420)

    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_write_snapshot_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_write_snapshot",
            "group_name": "subvolgroup_write_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_write_snapshot subvolgroup_write_snapshot_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        fs_util.create_file_data(client1, kernel_mounting_dir_1, 3, "snap1",
                                 "snap_1_data ")
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_write_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_write_snapshot_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=f"mkdir /mnt/cephfs_kernel{mounting_dir}_1/.snap/test",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed("Mkdir is working in .snap directory")
        out, rc = client1.exec_command(
            sudo=True,
            cmd=f"touch /mnt/cephfs_kernel{mounting_dir}_1/.snap/test",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed("touch is working in .snap directory")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
Exemplo n.º 20
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573502	Interrupt the cloning operation in-between and observe the behavior..
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. Create 1 sub volume groups
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_clone_cancel --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_clone_cancel snap_1 --group_name subvolgroup_1

    Clone Operations and Clone States:
    1. Create a clone in default locaction.
        ceph fs subvolume snapshot clone cephfs subvol_clone_cancel snap_1 clone_status_1 --group_name subvolgroup_1
    2. Create a clone and Cancel the Operation and check the state is in "canceled".
    3. Clone state should move to canceled state

    Clean-up:
    1. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>]
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_clone_cancel_1"}
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_cancel",
            "group_name": "subvolgroup_clone_cancel_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_clone_cancel subvolgroup_clone_cancel_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_cancel",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_clone_cancel_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        clone_status_3 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_cancel",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_status_3",
            "group_name": "subvolgroup_clone_cancel_1",
        }
        fs_util.create_clone(client1, **clone_status_3)
        fs_util.clone_cancel(
            client1,
            clone_status_3["vol_name"],
            clone_status_3["target_subvol_name"],
            group_name=clone_status_3.get("target_group_name", ""),
        )
        fs_util.validate_clone_state(client1, clone_status_3, "canceled")
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {"vol_name": default_fs, "subvol_name": "clone_status_3", "force": True},
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
Exemplo n.º 21
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_1

    Concurrent Clone Operations:
    1. Validate default value foe the clones i.e., 4
    2. Create 5 clones of the snap_1
        Ex: ceph fs subvolume snapshot clone cephfs subvol_2 snap_1 clone_1 --group_name subvolgroup_1
            ceph fs subvolume snapshot clone cephfs subvol_2 snap_1 clone_2 --group_name subvolgroup_1
            ceph fs subvolume snapshot clone cephfs subvol_2 snap_1 clone_3 --group_name subvolgroup_1
            ceph fs subvolume snapshot clone cephfs subvol_2 snap_1 clone_4 --group_name subvolgroup_1
            ceph fs subvolume snapshot clone cephfs subvol_2 snap_1 clone_5 --group_name subvolgroup_1
    3. Get the status of each clone using below command
        Ex: ceph fs clone status cephfs clone_1
            ceph fs clone status cephfs clone_2
            ceph fs clone status cephfs clone_3
            ceph fs clone status cephfs clone_4
            ceph fs clone status cephfs clone_5
    4. We are validating the total clones in_progress should not be greater than 4
    5. Once all the clones moved to complete state we are deleting all the clones
    6. Set the concurrent threads to 2
        Ex: ceph config set mgr mgr/volumes/max_concurrent_clones 2
    7. Create 5 clones of the snap_1
        Ex: ceph fs subvolume snapshot clone cephfs subvol_2 snap_1 clone_1 --group_name subvolgroup_1
            ceph fs subvolume snapshot clone cephfs subvol_2 snap_1 clone_2 --group_name subvolgroup_1
            ceph fs subvolume snapshot clone cephfs subvol_2 snap_1 clone_3 --group_name subvolgroup_1
            ceph fs subvolume snapshot clone cephfs subvol_2 snap_1 clone_4 --group_name subvolgroup_1
            ceph fs subvolume snapshot clone cephfs subvol_2 snap_1 clone_5 --group_name subvolgroup_1
    8. Get the status of each clone using below command
        Ex: ceph fs clone status cephfs clone_1
            ceph fs clone status cephfs clone_2
            ceph fs clone status cephfs clone_3
            ceph fs clone status cephfs clone_4
            ceph fs clone status cephfs clone_5
    9. We are validating the total clones in_progress should not be greater than 2
    10.Once all the clones moved to complete state we are deleting all the clones
    Clean-up:
    1. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>]
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """
    try:

        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup = {
            "vol_name": default_fs,
            "group_name": "subvolgroup_1"
        }
        fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_2",
            "group_name": "subvolgroup_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )
        out, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4000 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{fuse_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_2",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        clone_list = [{
            "vol_name": default_fs,
            "subvol_name": "subvol_2",
            "snap_name": "snap_1",
            "target_subvol_name": f"clone_{x}",
            "group_name": "subvolgroup_1",
        } for x in range(1, 6)]
        with parallel() as p:
            for clone in clone_list:
                p.spawn(fs_util.create_clone, client1, **clone, validate=False)
        status_list = []
        iteration = 0
        while status_list.count("complete") < len(clone_list):
            status_list.clear()
            iteration += 1
            for clone in clone_list:
                cmd_out, cmd_rc = fs_util.get_clone_status(
                    client1, clone["vol_name"], clone["target_subvol_name"])
                status = json.loads(cmd_out.read().decode())
                status_list.append(status["status"]["state"])
                log.info(
                    f"{clone['target_subvol_name']} status is {status['status']['state']}"
                )
            if status_list.count("in-progress") > 4:
                return 1
            else:
                log.info(
                    f"cloneing is in progress for {status_list.count('in-progress')} out of {len(clone_list)}"
                )
            log.info(f"Iteration {iteration} has been completed")

        rmclone_list = [{
            "vol_name": default_fs,
            "subvol_name": f"clone_{x}"
        } for x in range(1, 6)]
        for clonevolume in rmclone_list:
            fs_util.remove_subvolume(client1, **clonevolume)
        log.info(
            "Set clone threads to 2 and verify only 2 clones are in progress")
        client1.exec_command(
            sudo=True,
            cmd="ceph config set mgr mgr/volumes/max_concurrent_clones 2")
        for clone in clone_list:
            fs_util.create_clone(client1, **clone)
        status_list = []
        iteration = 0
        while status_list.count("complete") < len(clone_list):
            iteration += 1
            status_list.clear()
            for clone in clone_list:
                cmd_out, cmd_rc = fs_util.get_clone_status(
                    client1, clone["vol_name"], clone["target_subvol_name"])
                status = json.loads(cmd_out.read().decode())
                status_list.append(status["status"]["state"])
                log.info(
                    f"{clone['target_subvol_name']} status is {status['status']['state']}"
                )
            if status_list.count("in-progress") > 2:
                return 1
            else:
                log.info(
                    f"cloneing is in progress for {status_list.count('in-progress')} out of {len(clone_list)}"
                )
            log.info(f"Iteration {iteration} has been completed")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Setting back the clones to default value 4")
        client1.exec_command(
            sudo=True,
            cmd="ceph config set mgr mgr/volumes/max_concurrent_clones 4")
        for clonevolume in rmclone_list:
            fs_util.remove_subvolume(client1,
                                     **clonevolume,
                                     force=True,
                                     validate=False)
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
Exemplo n.º 22
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573408   Test to validate the quota remains intact even after rebooting the Node.
                    Create a FS and create 10 directories and mount them on kernel client and fuse client(5 mounts
                    each). Set max bytes quota to a number(say 1Gb) and also set max files quota (say 20) and verify if
                    the set quota limit is working fine by filling max number of files and also by filling data to reach
                    the max limit. Reboot the node , once the node is up verify if the set quota remains or not.
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. Set quota attribute 1gb and 50 files on both mount points
    4. Reboot the client node
    5. mount the subvolumes again
    6. Validate the quota attributes after reboot
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_quota_byte_increase_1"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_incr_fuse",
                "group_name": "subvolgroup_quota_byte_increase_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_incr_kernel",
                "group_name": "subvolgroup_quota_byte_increase_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_byte_incr_kernel subvolgroup_quota_byte_increase_1",
        )
        kernel_subvol_path = subvol_path.read().decode().strip()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{kernel_subvol_path}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_byte_incr_fuse subvolgroup_quota_byte_increase_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fuse_subvol_path = subvol_path.read().decode().strip()
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {fuse_subvol_path}",
        )
        fs_util.set_quota_attrs(clients[0], 50, 1073741824, fuse_mounting_dir_1)
        fuse_quota_attrs_before_reboot = fs_util.get_quota_attrs(
            clients[0], fuse_mounting_dir_1
        )

        fs_util.set_quota_attrs(clients[0], 50, 1073741824, kernel_mounting_dir_1)
        kernel_quota_attrs_before_reboot = fs_util.get_quota_attrs(
            clients[0], kernel_mounting_dir_1
        )

        fs_util.reboot_node(client1)

        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_2,
            ",".join(mon_node_ips),
            sub_dir=f"{kernel_subvol_path}",
        )
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_2,
            extra_params=f" -r {fuse_subvol_path}",
        )
        fuse_quota_attrs_after_reboot = fs_util.get_quota_attrs(
            clients[0], fuse_mounting_dir_2
        )
        kernel_quota_attrs_after_reboot = fs_util.get_quota_attrs(
            clients[0], kernel_mounting_dir_2
        )
        log.info(
            f"Quota Attributes befores reboot:{fuse_quota_attrs_before_reboot}\n"
            f"After reboot: {fuse_quota_attrs_after_reboot}"
        )
        if fuse_quota_attrs_after_reboot != fuse_quota_attrs_before_reboot:
            log.info("Fuse mount quota attributes are not matching after reboot")
            log.error(
                f"Quota Attributes befores reboot:{fuse_quota_attrs_before_reboot}\n"
                f"After reboot: {fuse_quota_attrs_after_reboot}"
            )
            return 1
        log.info(
            f"Quota Attributes befores reboot:{kernel_quota_attrs_before_reboot}\n"
            f"After reboot: {kernel_quota_attrs_after_reboot}"
        )
        if kernel_quota_attrs_before_reboot != kernel_quota_attrs_after_reboot:
            log.info("Fuse mount quota attributes are not matching after reboot")
            log.error(
                f"Quota Attributes befores reboot:{kernel_quota_attrs_before_reboot}\n"
                f"After reboot: {kernel_quota_attrs_after_reboot}"
            )
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
Exemplo n.º 23
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573501	Create a Cloned Volume using a snapshot

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. creats fs volume create cephfs if the volume is not there
    3. Create 2 sub volume groups
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_1
             ceph fs subvolumegroup create cephfs subvolgroup_2
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_clone_status --size 5368706371 --group_name subvolgroup_
    5. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    6. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_clone_status snap_1 --group_name subvolgroup_1

    Clone Operations and Clone States:
    1. Create a clone in default locaction.
        ceph fs subvolume snapshot clone cephfs subvol_clone_status snap_1 clone_status_1 --group_name subvolgroup_1
    2. Validate all the states of clone creation progress
        pending : Clone operation has not started
        in-progress : Clone operation is in progress
        complete : Clone operation has successfully finished
    3. Mount the cloned volume and check the contents
    4. Create a clone in different subvolumegroup ie., subvolumegroup 2
        ceph fs subvolume snapshot clone cephfs subvol_clone_status snap_1 clone_status_1 --group_name subvolgroup_1
            --target_group_name subvolgroup_2
    5. Validate all the states of clone creation progress
        pending : Clone operation has not started
        in-progress : Clone operation is in progress
        complete : Clone operation has successfully finished
    6.Once all the clones moved to complete state we are deleting all the clones

    Clean-up:
    1. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>]
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_clone_status_1"
            },
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_clone_status_2"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "group_name": "subvolgroup_clone_status_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_clone_status subvolgroup_clone_status_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_clone_status_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        clone_status_1 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_status_1",
            "group_name": "subvolgroup_clone_status_1",
        }
        fs_util.create_clone(client1, **clone_status_1)
        transitation_states = fs_util.validate_clone_state(
            client1, clone_status_1)
        valid_state_flow = [
            ["pending", "in-progress", "complete"],
            ["in-progress", "complete"],
        ]
        if transitation_states in valid_state_flow:
            return 1
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} {clone_status_1['target_subvol_name']}",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_2,
            extra_params=f" -r {clonevol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr {kernel_mounting_dir_1} {fuse_mounting_dir_2}")

        clone_status_2 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_status",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_status_2",
            "group_name": "subvolgroup_clone_status_1",
            "target_group_name": "subvolgroup_clone_status_2",
        }
        fs_util.create_clone(client1, **clone_status_2)
        transitation_states = fs_util.validate_clone_state(
            client1, clone_status_2)
        if transitation_states in valid_state_flow:
            return 1
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} "
            f"{clone_status_2['target_subvol_name']} {clone_status_2['target_group_name']}",
        )
        fuse_mounting_dir_3 = f"/mnt/cephfs_fuse{mounting_dir}_3/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_3,
            extra_params=f" -r {clonevol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr {kernel_mounting_dir_1} {fuse_mounting_dir_3}")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "clone_status_1"
            },
            {
                "vol_name": default_fs,
                "subvol_name": "clone_status_2",
                "group_name": "subvolgroup_clone_status_2",
            },
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
Exemplo n.º 24
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573522   Verify the retained snapshot details with "ceph fs info" command

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_info_retain
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_info_retain

    Retain the snapshots nad verify the data after cloning:
    1. Check the state od subvolume it should be in complete.
    2. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>
        --force --retain-snapshots]
    3. Remove the sub volume.
    4. Check the state of subvolume it should be in snapshot-retained.

    Clean Up:
    1. Del all the snapshots created
    2. Del Subvolumes
    3. Del SubvolumeGroups
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_info_retain"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_info",
            "group_name": "subvolgroup_info_retain",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_retain_info subvolgroup_info_retain",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_info",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_info_retain",
        }
        fs_util.create_snapshot(client1, **snapshot)
        client1.exec_command(sudo=True, cmd=f"mkdir -p /tmp/{mounting_dir}")
        client1.exec_command(
            sudo=True, cmd=f"cp -r {kernel_mounting_dir_1}/* /tmp/{mounting_dir}"
        )
        subvol_info, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume info {default_fs} subvol_retain_info subvolgroup_info_retain --format json",
            check_ec=False,
        )
        subvol_info_state = json.loads(subvol_info)
        log.info(
            f"subvol state before removing the volume with --retain-snapshots {subvol_info_state['state']}"
        )
        if subvol_info_state["state"] != "complete":
            raise CommandFailed(
                f"subvol state should be in complete state "
                f"but current state is {subvol_info_state['state']}"
            )
        fs_util.remove_subvolume(
            client1, **subvolume, retain_snapshots=True, force=True, validate=False
        )
        log.info(
            "Verifying Get the path of sub volume as subvolume will still be listed in filesystem"
        )
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_retain_info subvolgroup_info_retain",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed(
                "Remove subvolume with --retainSnapshots has not succeeded."
                "We are still able to fetch path of subvolume after deletion"
            )
            return 1
        subvol_info, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume info {default_fs} subvol_retain_info subvolgroup_info_retain --format json",
            check_ec=False,
        )
        subvol_info_state = json.loads(subvol_info)
        if subvol_info_state["state"] != "snapshot-retained":
            raise CommandFailed(
                f"subvol state should be in snapshot-retained state "
                f"but current state is {subvol_info_state['state']}"
            )
        log.info(
            f"subvol state after removing the volume with --retain-snapshots {subvol_info_state['state']}"
        )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {"vol_name": default_fs, "subvol_name": "subvol_retain_info"},
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(
                client1, **clone_vol, validate=False, force=True, check_ec=False
            )
        fs_util.remove_snapshot(client1, **snapshot, validate=False, force=True)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(
                client1, **subvolumegroup, force=True, check_ec=False, validate=False
            )
Exemplo n.º 25
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573521	Remove a subvolume group by retaining the snapshot : ceph fs subvolume rm <vol_n...

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_retain_snapshot_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_retain_snapshot_1
    6. Collect the data from mount volume to local disk for verififaction

    Retain the snapshots nad verify the data after cloning:
    1. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>
        --force --retain-snapshots]
    2. Remove the sub volume.
    3. Clone the new volume from the retained snapshots
    4. Check the contents of the cloned volume with the copy present locally

    Clean Up:
    1. Del all the snapshots created
    2. Del Subvolumes
    3. Del SubvolumeGroups
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_retain_snapshot_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "group_name": "subvolgroup_retain_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_retain_snapshot subvolgroup_retain_snapshot_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_retain_snapshot_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        client1.exec_command(sudo=True, cmd=f"mkdir -p /tmp/{mounting_dir}")
        client1.exec_command(
            sudo=True,
            cmd=f"cp -r {kernel_mounting_dir_1}/* /tmp/{mounting_dir}")

        fs_util.remove_subvolume(client1,
                                 **subvolume,
                                 retain_snapshots=True,
                                 force=True,
                                 validate=False)
        log.info(
            "Verifying Get the path of sub volume as subvolume will still be listed in filesystem"
        )
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_retain_snapshot subvolgroup_retain_snapshot_1",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed(
                "Remove subvolume with --retainSnapshots has not succeeded."
                "We are still able to fetch path of subvolume after deletion")
            return 1
        log.info("Clone a subvolume from snapshot")
        retain_snapshot_1 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "snap_name": "snap_1",
            "target_subvol_name": "retain_snapshot_1",
            "group_name": "subvolgroup_retain_snapshot_1",
        }
        fs_util.create_clone(client1, **retain_snapshot_1)
        fs_util.validate_clone_state(client1, retain_snapshot_1)
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} {retain_snapshot_1['target_subvol_name']}",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_2,
            extra_params=f" -r {clonevol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr /tmp/{mounting_dir} {fuse_mounting_dir_2}")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "retain_snapshot_1"
            },
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1,
                                **snapshot,
                                validate=False,
                                force=True)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
Exemplo n.º 26
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573402	Test to validate the quota.max_bytes Create a FS and create 10 directories and m...
                    Test to validate the quota.max_bytes  Create a FS and create 10 directories and
                    mount them on kernel client and fuse client(5 mounts each)
                    Set max bytes quota to a number(say 1Gb) and fill data until it reaches the limit and
                    verify if the set  quota limit is working fine. Similarly set different limit on
                    different directories and verify quota.

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. set quota attribute 1gb on both mount points
    4. Create 3gb files and check it fails
    5. Perform same on kernel mount
    6. Create a directory inside fuse mount and set quota attribute and verify
    7. Create a directory inside kernel mount and set quota attribute and verify
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")

        log.info("Cheking the file quota on root directory")
        root_folder_fuse_mount = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount([clients[0]], root_folder_fuse_mount)

        clients[0].exec_command(
            sudo=True,
            cmd=f"rm -rf {root_folder_fuse_mount}*;mkdir {root_folder_fuse_mount}test_fuse;",
        )
        fs_util.set_quota_attrs(
            clients[0], 20, 1073741824, f"{root_folder_fuse_mount}test_fuse"
        )
        quota_attrs = fs_util.get_quota_attrs(
            clients[0], f"{root_folder_fuse_mount}test_fuse"
        )
        fs_util.byte_quota_test(
            clients[0], f"{root_folder_fuse_mount}test_fuse", quota_attrs
        )
        log.info("Setting Back the file quota to 0 on root directory ")
        fs_util.set_quota_attrs(
            clients[0], "0", "0", f"{root_folder_fuse_mount}test_fuse"
        )

        root_folder_kernel_mount = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]], root_folder_kernel_mount, ",".join(mon_node_ips)
        )
        clients[0].exec_command(
            sudo=True,
            cmd=f"rm -rf {root_folder_kernel_mount}*;mkdir {root_folder_kernel_mount}test_kernel",
        )
        fs_util.set_quota_attrs(
            clients[0], 20, 1073741824, f"{root_folder_kernel_mount}test_kernel"
        )
        quota_attrs = fs_util.get_quota_attrs(
            clients[0], f"{root_folder_kernel_mount}test_kernel"
        )
        fs_util.byte_quota_test(
            clients[0], f"{root_folder_kernel_mount}test_kernel", quota_attrs
        )
        log.info("Setting Back the file quota to 0 on root directory ")
        fs_util.set_quota_attrs(
            clients[0], "0", "0", f"{root_folder_kernel_mount}test_kernel"
        )

        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_quota_byte_limit_1"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_fuse",
                "group_name": "subvolgroup_quota_byte_limit_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_bytes_kernel",
                "group_name": "subvolgroup_quota_byte_limit_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_bytes_kernel subvolgroup_quota_byte_limit_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_byte_fuse subvolgroup_quota_byte_limit_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"

        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.strip()}",
        )

        fs_util.set_quota_attrs(clients[0], 200, 1073741824, kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], kernel_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        fs_util.set_quota_attrs(clients[0], 100, 1073741824, fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        clients[0].exec_command(
            sudo=True, cmd=f"cd {fuse_mounting_dir_1};rm -rf *;mkdir test;"
        )
        fs_util.set_quota_attrs(
            clients[0], 30, 1073741824, f"{fuse_mounting_dir_1}/test"
        )
        quota_attrs = fs_util.get_quota_attrs(clients[0], f"{fuse_mounting_dir_1}/test")
        fs_util.byte_quota_test(clients[0], f"{fuse_mounting_dir_1}/test", quota_attrs)

        clients[0].exec_command(
            sudo=True, cmd=f"cd {kernel_mounting_dir_1};rm -rf *;mkdir test;"
        )
        fs_util.set_quota_attrs(
            clients[0], 30, 1073741824, f"{kernel_mounting_dir_1}/test"
        )
        quota_attrs = fs_util.get_quota_attrs(
            clients[0], f"{kernel_mounting_dir_1}/test"
        )
        fs_util.byte_quota_test(
            clients[0], f"{kernel_mounting_dir_1}/test", quota_attrs
        )

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        if "5" in build:
            fs_util.set_quota_attrs(clients[0], "0", "0", root_folder_fuse_mount)
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
Exemplo n.º 27
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573524	Ensure the subvolume attributes are retained post clone operations

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1
    5. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    6. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_clone_attr_vol_1
    7. Set file and size xattributes on dir

    Test Case Flow:
    1. Create Clone out of subvolume.
    2. Mount the cloned volume.
    3. Validate the contents of cloned volume with contents present of subvolume
    4. Validate file and size xattributes on dir in cloned volume

    Clean Up:
    1. Delete Cloned volume
    2. Delete subvolumegroup
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_clone_attr_vol_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_attr_vol",
            "group_name": "subvolgroup_clone_attr_vol_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_clone_attr_vol subvolgroup_clone_attr_vol_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        fs_util.set_quota_attrs(clients[0], 9999, 999, kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_attr_vol",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_clone_attr_vol_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        client1.exec_command(sudo=True, cmd=f"mkdir -p /tmp/{mounting_dir}")

        log.info("Clone a subvolume from snapshot")
        clone_attr_vol_1 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_clone_attr_vol",
            "snap_name": "snap_1",
            "target_subvol_name": "clone_attr_vol_1",
            "group_name": "subvolgroup_clone_attr_vol_1",
        }
        fs_util.create_clone(client1, **clone_attr_vol_1)
        fs_util.validate_clone_state(client1, clone_attr_vol_1)
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} {clone_attr_vol_1['target_subvol_name']}",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_2,
            extra_params=f" -r {clonevol_path.read().decode().strip()}",
        )
        quota_attrs_clone = fs_util.get_quota_attrs(clients[0],
                                                    fuse_mounting_dir_2)
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr {kernel_mounting_dir_1} {fuse_mounting_dir_2}")
        if quota_attrs_clone != quota_attrs:
            log.info(f"attributes of cloned volumes{quota_attrs_clone}")
            log.info(f"attributes of volumes{quota_attrs}")
            log.error(
                "Quota attributes of the clone is not matching with quota attributes of subvolume"
            )
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "clone_attr_vol_1"
            },
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1,
                                **snapshot,
                                validate=False,
                                check_ec=False)
        fs_util.remove_subvolume(client1,
                                 **subvolume,
                                 validate=False,
                                 check_ec=False)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
Exemplo n.º 28
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573400   Test to validate the increase in quota limit once it reaches the max limit. (files)
                     Create a FS and create 10 directories and mount them on kernel client and fuse client(5 mounts
                     each). Set max file quota to a number(say 50) and add up to that number of files to that directory
                     and verify if the set quota limit is working fine.
                     Increase the set quota limit to more that what was set
                     earlier and add more files and verify.
                     Similarly set different limit on different directories, increase
                     the limit and verify it’s functionality and verify quota
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. set file attribute 10 on both mount points
    4. Create 11 files and check it fails at 11 iteration
    5. Perform same on kernel mount
    6. Create a directory inside fuse mount and set file attribute and verify
    7. Create a directory inside kernel mount and set file attribute and verify
    8. Increase the quota of files to 20 and try creating the files on the same directory used in step 3
    9. Perorm same on Kernel mount
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_quota_file_increase_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_file_incr_fuse",
                "group_name": "subvolgroup_quota_file_increase_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_file_incr_kernel",
                "group_name": "subvolgroup_quota_file_increase_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_file_incr_kernel subvolgroup_quota_file_increase_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_file_incr_fuse subvolgroup_quota_file_increase_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.strip()}",
        )
        fs_util.set_quota_attrs(clients[0], 50, 10000000, fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.file_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        log.info("Increasing the quota to 100 and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 100, 10000000, fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.file_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        fs_util.set_quota_attrs(clients[0], 50, 10000000,
                                kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.file_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        log.info("Increasing the quota to 100 and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 100, 10000000,
                                kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.file_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
Exemplo n.º 29
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573418	Create a Snapshot, reboot the node and rollback the snapshot

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. creats fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_reboot_snapshot_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    5. Create Data on the subvolume. We will add known data as we are going to verify the files
        Ex: create_file_data()
    6. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_reboot_snapshot_1

    Script Flow:
    1. Mount the subvolume on the client using Kernel
    2. Write data into the mount point
    3. Get the checksum of the files inside the mount point
    4. Reboot the node
    5. Mount again the subvolume and revert the snapshot
    6. get the checksum of the files
    7. Validate the checksums
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_reboot_snapshot_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_reboot_snapshot",
            "group_name": "subvolgroup_reboot_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_reboot_snapshot subvolgroup_reboot_snapshot_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        fs_util.create_file_data(client1, kernel_mounting_dir_1, 3, "snap1",
                                 "snap_1_data ")
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_reboot_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_reboot_snapshot_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        files_checksum_before_reboot = fs_util.get_files_and_checksum(
            client1, f"/mnt/cephfs_kernel{mounting_dir}_1")
        fs_util.reboot_node(client1)
        kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_reboot_snapshot subvolgroup_reboot_snapshot_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_2,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"cd {kernel_mounting_dir_2};cp .snap/_snap_1_*/* .")
        files_checksum_after_reboot = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_2)
        if files_checksum_before_reboot != files_checksum_after_reboot:
            log.error("checksum is not matching after snapshot1 revert")
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)