コード例 #1
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-11319	Create first snap add more data to original then create a second snap.
                Rollback 1st snap do data validation.
                Rollback 2nd snap and do data validation.Perform cross platform rollback
                i.e. take snap on kernel mount and perform rollback using fuse mount
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvol_cross_platform_snapshot_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume. We will add known data as we are going to verify the files
        Ex: create_file_data()
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvol_cross_platform_snapshot_1

    Script Flow:
    1. Mount the subvolume on the client using Kernel and fuse mount
    2. Write data into the fuse mount point i.e., data_from_fuse_mount
    3. Collect the checksum of the files
    4. Take snapshot at this point i.e., snap_1
    5. Write data into the kernel mount point i.e., data_from_kernel_mount
    6. Collect the checksum of the files
    7. Take snapshot at this point i.e., snap_2
    8. On Kernel mount revert the snapshot to snap_1 and compare the checksum of the files collected in step 3
    9. On Fuse mount revert the snapshot to snap_2 and compare the checksum of the files colleced in step 6

    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvol_cross_platform_snapshot_1"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "group_name": "subvol_cross_platform_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_cross_platform_snapshot"
            f" subvol_cross_platform_snapshot_1",
        )
        subvol_path = subvol_path.strip()
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path}",
        )
        fs_util.create_file_data(
            client1, fuse_mounting_dir_1, 3, "snap1", "data_from_fuse_mount "
        )
        fuse_files_checksum = fs_util.get_files_and_checksum(
            client1, fuse_mounting_dir_1
        )
        fuse_snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvol_cross_platform_snapshot_1",
        }
        fs_util.create_snapshot(client1, **fuse_snapshot)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path}",
        )
        fs_util.create_file_data(
            client1, kernel_mounting_dir_1, 3, "snap1", "data_from_kernel_mount "
        )
        kernel_snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "snap_name": "snap_2",
            "group_name": "subvol_cross_platform_snapshot_1",
        }
        fs_util.create_snapshot(client1, **kernel_snapshot)
        kernel_files_checksum = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1
        )
        client1.exec_command(
            sudo=True, cmd=f"cd {kernel_mounting_dir_1};cp .snap/_snap_1_*/* ."
        )
        kernel_mount_revert_snap_fuse = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1
        )
        if fuse_files_checksum != kernel_mount_revert_snap_fuse:
            log.error(
                "checksum is not when reverted to snap1 i.e., from fuse mount snapshot revert"
            )
            return 1
        client1.exec_command(
            sudo=True, cmd=f"cd {fuse_mounting_dir_1};cp .snap/_snap_2_*/* ."
        )
        fuse_mount_revert_snap_kernel = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1
        )
        if kernel_files_checksum != fuse_mount_revert_snap_kernel:
            log.error(
                "checksum is not when reverted to snap2 i.e., from kernel mount snapshot revert"
            )
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **kernel_snapshot)
        fs_util.remove_snapshot(client1, **fuse_snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
コード例 #2
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573420	Try writing the data to snap directory

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_write_snapshot_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_


    Script Flow:
    1. Mount the subvolume on the client using Kernel
    2. Write data into the mount point
    3. Try creating directory and file inside the mount_point/.snap directory(CEPH-83573420)

    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_write_snapshot_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_write_snapshot",
            "group_name": "subvolgroup_write_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_write_snapshot subvolgroup_write_snapshot_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        fs_util.create_file_data(client1, kernel_mounting_dir_1, 3, "snap1",
                                 "snap_1_data ")
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_write_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_write_snapshot_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=f"mkdir /mnt/cephfs_kernel{mounting_dir}_1/.snap/test",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed("Mkdir is working in .snap directory")
        out, rc = client1.exec_command(
            sudo=True,
            cmd=f"touch /mnt/cephfs_kernel{mounting_dir}_1/.snap/test",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed("touch is working in .snap directory")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
コード例 #3
0
ファイル: rename_snap_dir.py プロジェクト: udaysk23/cephci
def run(ceph_cluster, **kw):
    """
     Test Cases Covered:
     CEPH-83573255	Try renaming the snapshot directory and rollbackCreate a FS and
                     create 10 directories and mount them on kernel client and fuse client(5 mounts each)
                     Add data (~ 100 GB). Create a Snapshot and verify the content in snap directory.
                     Try modifying the snapshot name.

     Pre-requisites :
     1. We need atleast one client node to execute this test case
     2. creats fs volume create cephfs if the volume is not there
     3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
         Ex : ceph fs subvolumegroup create cephfs subvol_rename_sanp_1
     4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
        [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
        Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    5. Create snapshot of the subvolume
         Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvol_rename_sanp_1

     Script Flow:
     1. Mount the subvolume on the client using fuse mount.
     2. Rename the snapshot snap/_snap_1 to snap/_snap_rename.
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvol_rename_sanp_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_rename_sanp",
            "group_name": "subvol_rename_sanp_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_rename_sanp subvol_rename_sanp_1",
        )
        subvol_path = subvol_path.strip()
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path}",
        )
        fs_util.create_file_data(client1, fuse_mounting_dir_1, 3, "snap1",
                                 "data_from_fuse_mount ")
        fuse_snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_rename_sanp",
            "snap_name": "snap_1",
            "group_name": "subvol_rename_sanp_1",
        }
        fs_util.create_snapshot(client1, **fuse_snapshot)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"cd {fuse_mounting_dir_1};mv .snap/_snap_1_* .snap/_snap_rename_",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed(
                "we are able to rename the snap directory.. which is not correct"
            )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **fuse_snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
コード例 #4
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573418	Create a Snapshot, reboot the node and rollback the snapshot

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. creats fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_reboot_snapshot_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    5. Create Data on the subvolume. We will add known data as we are going to verify the files
        Ex: create_file_data()
    6. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_reboot_snapshot_1

    Script Flow:
    1. Mount the subvolume on the client using Kernel
    2. Write data into the mount point
    3. Get the checksum of the files inside the mount point
    4. Reboot the node
    5. Mount again the subvolume and revert the snapshot
    6. get the checksum of the files
    7. Validate the checksums
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_reboot_snapshot_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_reboot_snapshot",
            "group_name": "subvolgroup_reboot_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_reboot_snapshot subvolgroup_reboot_snapshot_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        fs_util.create_file_data(client1, kernel_mounting_dir_1, 3, "snap1",
                                 "snap_1_data ")
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_reboot_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_reboot_snapshot_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        files_checksum_before_reboot = fs_util.get_files_and_checksum(
            client1, f"/mnt/cephfs_kernel{mounting_dir}_1")
        fs_util.reboot_node(client1)
        kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_reboot_snapshot subvolgroup_reboot_snapshot_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_2,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"cd {kernel_mounting_dir_2};cp .snap/_snap_1_*/* .")
        files_checksum_after_reboot = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_2)
        if files_checksum_before_reboot != files_checksum_after_reboot:
            log.error("checksum is not matching after snapshot1 revert")
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
コード例 #5
0
def run(ceph_cluster, **kw):
    """
    CEPH-83574026 - zip unzip files continuously on a nfs share
    Pre-requisites:
    1. Create cephfs volume
       creats fs volume create <vol_name>
    2. Create nfs cluster
       ceph nfs cluster create <nfs_name> <nfs_server>
    Test operation:
    1. Create cephfs nfs export
       ceph nfs export create cephfs <fs_name> <nfs_name> <nfs_export_name> path=<export_path>
    2. Create multiple files
    3. Continuously zip & unzip files create in step 2
    4. Repeat step 2-3
    Clean-up:
    1. Remove all the data in Cephfs file system
    2. Remove all the cephfs mounts
    3. Delete cephfs nfs export
    """
    try:
        tc = "CEPH-83574024"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        rhbuild = config.get("rhbuild")
        nfs_servers = ceph_cluster.get_ceph_objects("nfs")
        nfs_server = nfs_servers[0].node.hostname
        nfs_name = "cephfs-nfs"
        nfs_export_name = "/export_" + "".join(
            secrets.choice(string.digits) for i in range(3))
        export_path = "/"
        fs_name = "cephfs"
        nfs_mounting_dir = "/mnt/nfs_" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        if "5.0" in rhbuild:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name} "
                f"{nfs_export_name} path={export_path}",
            )
        else:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {nfs_name} "
                f"{nfs_export_name} {fs_name} path={export_path}",
            )
        commands = [
            f"mkdir -p {nfs_mounting_dir}",
            f"mount -t nfs -o port=2049 {nfs_server}:{nfs_export_name} {nfs_mounting_dir}",
            "yum install zip -y",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        client1.exec_command(sudo=True,
                             cmd=f"mkdir {nfs_mounting_dir}/dir1..10")
        fs_util.create_file_data(client1, nfs_mounting_dir + "/files_dir1",
                                 100, "file", "file_data")
        fs_util.create_file_data(client1, nfs_mounting_dir + "/dir7", 100,
                                 "file", "random_data")
        commands = [
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir2/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir1/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir2/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir2/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir3/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir2/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir3/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir3/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir4/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir3/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir4/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir4/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir5/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir4/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir5/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir5/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir6/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir5/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir6/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir6/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir7/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir6/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir7/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir7/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir8/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir7/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir8/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir8/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir9/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir8/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir9/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir9/",
            f"for n in {{1..100}}; do zip {nfs_mounting_dir}/dir10/file_$( printf %03d "
            "$n"
            " ).zip  {nfs_mounting_dir}/dir9/file_$( printf %03d "
            "$n"
            " )",
            f"for n in {{1..100}}; do unzip {nfs_mounting_dir}/dir10/file_$( printf %03d "
            "$n"
            " ).zip -d {nfs_mounting_dir}/dir10/",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up the system")
        commands = [
            f"rm -rf {nfs_mounting_dir}/*",
            f"umount {nfs_mounting_dir}",
            f"ceph nfs export delete {nfs_name} {nfs_export_name}",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        client1.exec_command(sudo=True,
                             cmd=f"rm -rf {nfs_mounting_dir}/",
                             check_ec=False)