def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573418	Create a Snapshot, reboot the node and rollback the snapshot

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. creats fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_reboot_snapshot_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    5. Create Data on the subvolume. We will add known data as we are going to verify the files
        Ex: create_file_data()
    6. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_reboot_snapshot_1

    Script Flow:
    1. Mount the subvolume on the client using Kernel
    2. Write data into the mount point
    3. Get the checksum of the files inside the mount point
    4. Reboot the node
    5. Mount again the subvolume and revert the snapshot
    6. get the checksum of the files
    7. Validate the checksums
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_reboot_snapshot_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_reboot_snapshot",
            "group_name": "subvolgroup_reboot_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_reboot_snapshot subvolgroup_reboot_snapshot_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        fs_util.create_file_data(client1, kernel_mounting_dir_1, 3, "snap1",
                                 "snap_1_data ")
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_reboot_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_reboot_snapshot_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        files_checksum_before_reboot = fs_util.get_files_and_checksum(
            client1, f"/mnt/cephfs_kernel{mounting_dir}_1")
        fs_util.reboot_node(client1)
        kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_reboot_snapshot subvolgroup_reboot_snapshot_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_2,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"cd {kernel_mounting_dir_2};cp .snap/_snap_1_*/* .")
        files_checksum_after_reboot = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_2)
        if files_checksum_before_reboot != files_checksum_after_reboot:
            log.error("checksum is not matching after snapshot1 revert")
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
Beispiel #2
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573871   Explore ceph-fuse mount of more than 2 Filesystem on same client.
                    Also verify persistent mounts upon reboots.
    Pre-requisites :
    1. We need atleast one client node to execute this test case

    Test Case Flow:
    1. Create 2 file systems if not present
    2. mount both the file systems and using fuse mount and fstab entry
    3. reboot the node
    4. validate if the mount points are still present
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        client1 = clients[0]
        client1.exec_command(sudo=True,
                             cmd="ceph fs volume create cephfs_new",
                             check_ec=False)
        fs_util.wait_for_mds_process(client1, "cephfs_new")
        total_fs = fs_util.get_fs_details(client1)
        if len(total_fs) < 2:
            log.error(
                "We can't proceed with the test case as we are not able to create 2 filesystems"
            )

        fs_names = [fs["name"] for fs in total_fs]
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"

        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f"--client_fs {fs_names[0]}",
            fstab=True,
        )
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_2,
            extra_params=f"--client_fs {fs_names[1]}",
            fstab=True,
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{fuse_mounting_dir_1}",
            long_running=True,
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{fuse_mounting_dir_2}",
            long_running=True,
        )
        fs_util.reboot_node(client1)
        out, rc = client1.exec_command(cmd="mount")
        mount_output = out.split()
        log.info("validate fuse mount:")
        assert fuse_mounting_dir_1.rstrip(
            "/") in mount_output, "fuse mount failed"
        assert fuse_mounting_dir_2.rstrip(
            "/") in mount_output, "fuse mount failed"
        client1.exec_command(
            sudo=True, cmd=f"mkdir -p {fuse_mounting_dir_1}/io_after_reboot")
        client1.exec_command(
            sudo=True, cmd=f"mkdir -p {fuse_mounting_dir_2}/io_after_reboot")
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{fuse_mounting_dir_1}/io_after_reboot",
            long_running=True,
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{fuse_mounting_dir_2}/io_after_reboot",
            long_running=True,
        )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_2)
        commands = [
            "ceph config set mon mon_allow_pool_delete true",
            "ceph fs volume rm cephfs_new --yes-i-really-mean-it",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        client1.exec_command(sudo=True,
                             cmd="mv /etc/fstab.backup /etc/fstab",
                             check_ec=False)
Beispiel #3
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573408   Test to validate the quota remains intact even after rebooting the Node.
                    Create a FS and create 10 directories and mount them on kernel client and fuse client(5 mounts
                    each). Set max bytes quota to a number(say 1Gb) and also set max files quota (say 20) and verify if
                    the set quota limit is working fine by filling max number of files and also by filling data to reach
                    the max limit. Reboot the node , once the node is up verify if the set quota remains or not.
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. Set quota attribute 1gb and 50 files on both mount points
    4. Reboot the client node
    5. mount the subvolumes again
    6. Validate the quota attributes after reboot
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvolgroup_quota_byte_increase_1"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_incr_fuse",
                "group_name": "subvolgroup_quota_byte_increase_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_incr_kernel",
                "group_name": "subvolgroup_quota_byte_increase_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_byte_incr_kernel subvolgroup_quota_byte_increase_1",
        )
        kernel_subvol_path = subvol_path.read().decode().strip()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{kernel_subvol_path}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_byte_incr_fuse subvolgroup_quota_byte_increase_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fuse_subvol_path = subvol_path.read().decode().strip()
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {fuse_subvol_path}",
        )
        fs_util.set_quota_attrs(clients[0], 50, 1073741824, fuse_mounting_dir_1)
        fuse_quota_attrs_before_reboot = fs_util.get_quota_attrs(
            clients[0], fuse_mounting_dir_1
        )

        fs_util.set_quota_attrs(clients[0], 50, 1073741824, kernel_mounting_dir_1)
        kernel_quota_attrs_before_reboot = fs_util.get_quota_attrs(
            clients[0], kernel_mounting_dir_1
        )

        fs_util.reboot_node(client1)

        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_2,
            ",".join(mon_node_ips),
            sub_dir=f"{kernel_subvol_path}",
        )
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_2,
            extra_params=f" -r {fuse_subvol_path}",
        )
        fuse_quota_attrs_after_reboot = fs_util.get_quota_attrs(
            clients[0], fuse_mounting_dir_2
        )
        kernel_quota_attrs_after_reboot = fs_util.get_quota_attrs(
            clients[0], kernel_mounting_dir_2
        )
        log.info(
            f"Quota Attributes befores reboot:{fuse_quota_attrs_before_reboot}\n"
            f"After reboot: {fuse_quota_attrs_after_reboot}"
        )
        if fuse_quota_attrs_after_reboot != fuse_quota_attrs_before_reboot:
            log.info("Fuse mount quota attributes are not matching after reboot")
            log.error(
                f"Quota Attributes befores reboot:{fuse_quota_attrs_before_reboot}\n"
                f"After reboot: {fuse_quota_attrs_after_reboot}"
            )
            return 1
        log.info(
            f"Quota Attributes befores reboot:{kernel_quota_attrs_before_reboot}\n"
            f"After reboot: {kernel_quota_attrs_after_reboot}"
        )
        if kernel_quota_attrs_before_reboot != kernel_quota_attrs_after_reboot:
            log.info("Fuse mount quota attributes are not matching after reboot")
            log.error(
                f"Quota Attributes befores reboot:{kernel_quota_attrs_before_reboot}\n"
                f"After reboot: {kernel_quota_attrs_after_reboot}"
            )
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
Beispiel #4
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573872   Explore kernel mount of more than 2 Filesystem on same client.
                    Also verify persistent mounts upon reboots.
    Pre-requisites :
    1. We need atleast one client node to execute this test case

    Test Case Flow:
    1. Create 2 file systems if not present
    2. mount both the file systems and usingkernel mount and fstab entry
    3. reboot the node
    4. validate if the mount points are still present
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        client1 = clients[0]
        client1.exec_command(sudo=True,
                             cmd="ceph fs volume create cephfs_new",
                             check_ec=False)
        total_fs = fs_util.get_fs_details(client1)
        if len(total_fs) < 2:
            log.error(
                "We can't proceed with the test case as we are not able to create 2 filesystems"
            )

        fs_names = [fs["name"] for fs in total_fs]
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            extra_params=f",fs={fs_names[0]}",
            fstab=True,
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_2,
            ",".join(mon_node_ips),
            extra_params=f",fs={fs_names[1]}",
            fstab=True,
        )
        fs_util.reboot_node(client1)
        out, rc = client1.exec_command(cmd="mount")
        mount_output = out.read().decode()
        mount_output = mount_output.split()
        log.info("validate kernel mount:")
        assert kernel_mounting_dir_1.rstrip(
            "/") in mount_output, "Kernel mount failed"
        assert kernel_mounting_dir_2.rstrip(
            "/") in mount_output, "Kernel mount failed"
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        fs_util.client_clean_up(
            "umount",
            kernel_clients=[clients[0]],
            mounting_dir=kernel_mounting_dir_1,
        )
        fs_util.client_clean_up(
            "umount",
            kernel_clients=[clients[0]],
            mounting_dir=kernel_mounting_dir_2,
        )

        commands = [
            "ceph config set mon mon_allow_pool_delete true",
            "ceph fs volume rm cephfs_new --yes-i-really-mean-it",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        client1.exec_command(sudo=True,
                             cmd="cp /etc/fstab.backup /etc/fstab",
                             check_ec=False)