def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. Create a subvolume group that does not exist
    Test operation:
    1. Try to delete the subvolume group
    2. Check if subvolume group deletion fails

    """
    try:

        tc = "CEPH-83574162"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        subvolume_group = "non_exist_subvolume_group_name"
        output, err = fs_util.remove_subvolumegroup(client1,
                                                    "cephfs",
                                                    subvolume_group,
                                                    check_ec=False)
        if output == 0:
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    Test operation:
    1. Create a subvolume_group name that does not exist
    2. Try to remove the subvolume_group name that does not exist
    3. The command should fail because the subvolume_group name does not exist.
    4. Check if the command is failed
    """
    try:

        tc = "CEPH-83574168"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        target_delete_subvolume_group = "non_exist_subvolume_group_name"
        c_out, c_err = fs_util.remove_subvolumegroup(
            client1,
            "cephfs",
            target_delete_subvolume_group,
            validate=False,
            check_ec=False,
        )
        if c_err:
            return 0
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #3
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573878   Verify the option to enable/disable multiFS support
    Pre-requisites :
    1. We need atleast one client node to execute this test case

    Test Case Flow:
    1. check the enable_multiple flag value
    2. Get total number of filesystems present
    3. Disable enable_multiple if enabled and try creating filesystem
    4. Enable enable_multiple and try creating filesystem
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        client1 = clients[0]
        total_fs = fs_util.get_fs_details(client1)
        if len(total_fs) == 1:
            client1.exec_command(sudo=True,
                                 cmd="ceph fs flag set enable_multiple false")
        out, rc = client1.exec_command(sudo=True,
                                       cmd="ceph fs volume create cephfs_new",
                                       check_ec=False)
        if rc == 0:
            raise CommandFailed(
                "We are able to create multipe filesystems even after setting enable_multiple to false"
            )
        log.info(
            "We are not able to create multipe filesystems after setting enable_multiple to false as expected"
        )
        client1.exec_command(sudo=True,
                             cmd="ceph fs flag set enable_multiple true")
        client1.exec_command(sudo=True, cmd="ceph fs volume create cephfs_new")
        log.info(
            "We are able to create multipe filesystems after setting enable_multiple to True as expected"
        )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        commands = [
            "ceph config set mon mon_allow_pool_delete true",
            "ceph fs volume rm cephfs_new --yes-i-really-mean-it",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
예제 #4
0
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. Create a pool with "-" name
    Test operation:
    1. Create a cephfs
    2. Add the cephfs to the pool name with "-"
    3. Create a subvolume with in the cephfs attatched to the pool
    4. Run IOs
    5. Check if any failure happens during the operation

    """
    try:

        tc = "CEPH-83573528"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
        )
        pool_names = ["ceph-fs-pool"]

        for pool_name in pool_names:
            client1.exec_command(f"ceph osd pool create {pool_name}")
            output, err = client1.exec_command(
                f"ceph fs add_data_pool cephfs {pool_name}"
            )
            if output == 1:
                return 1
            subvol_name = "".join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in list(range(10))
            )
            fs_util.create_subvolume(client1, "cephfs", f"subvol_{subvol_name}")
            run_ios(client1, kernel_mounting_dir_1)
            fs_util.remove_subvolume(client1, "cephfs", f"subvol_{subvol_name}")

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #5
0
def run(ceph_cluster, **kw):
    """
    Test operation:
    1. Generate random name fo subvolume creation
    2. Create cephfs subvolume with name created in the first step
    3. Again create cephfs subvolume withg name created in first step with resize parameter
    4. Check if cephfs subvolume is resixed
    5. Remove cephfs subvolume
    6. Verify if trash directory is empty
    """
    try:

        tc = "CEPH-83574186"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        subvolume_name_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5)))
        subvolume = {
            "vol_name": "cephfs",
            "subvol_name": f"subvol_{subvolume_name_generate}",
            "size": "5368706371",
        }
        subvolume_name = subvolume["subvol_name"]
        fs_util.create_subvolume(client1, **subvolume)
        new_size = "26843531685"
        c_out, c_err = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume resize cephfs {subvolume_name} {new_size}",
        )
        c_out_result = json.loads(c_out.read().decode())
        target_size = c_out_result[1]["bytes_quota"]
        if int(target_size) != int(new_size):
            return 1
        c_out2, c_err2 = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume info cephfs {subvolume_name} -f json")
        c_out2_result = json.loads(c_out2.read().decode())
        target_quota = c_out2_result["bytes_quota"]
        if int(target_quota) != int(new_size):
            return 1
        fs_util.remove_subvolume(client1,
                                 "cephfs",
                                 subvolume_name,
                                 validate=True)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. Create a volume with a name
    2. Create a subvolume with a name
    3. Create a subvolume group with a name
    Test operation:
    1. Try to create a volume with the same name
    2. Try to create a subvolume with the same name
    3. Try to create a subvolume group with the same name
    """
    try:
        tc = "CEPH-83573428"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        fs_util.prepare_clients(clients, build)

        random_name = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5)))
        volume_name = "vol_01" + random_name
        subvolume_name = "subvol_01" + random_name
        subvolume_group_name = "subvol_group_name_01" + random_name
        log.info("Ceph Build number is " + build[0])
        fs_util.create_fs(client1, volume_name)
        fs_util.create_subvolume(client1, volume_name, subvolume_name)
        fs_util.create_subvolumegroup(client1, "cephfs", subvolume_group_name)
        output1, err1 = fs_util.create_fs(client1, volume_name, check_ec=False)
        output2, err2 = fs_util.create_subvolume(client1,
                                                 volume_name,
                                                 subvolume_name,
                                                 check_ec=False)
        output3, err3 = fs_util.create_subvolumegroup(client1,
                                                      volume_name,
                                                      subvolume_name,
                                                      check_ec=False)
        if output1 == 0 or output2 == 0 or output3 == 0:
            return 1
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #7
0
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. Create a random subvolume_group name that does not exist
    Test operation:
    1. Try to create a subvolume on the subvolume_group name
    2. If the creation fails, memo it
    3. Try to delete a subvolume on the subvolume_group name
    4. If the deletion fails, memo it.
    5. If the creation and deletion are failed, return 0
    """
    try:

        tc = "CEPH-83574161"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        result = 0
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        subvolume_group = "non_exist_subvolume_group_name"
        subvolume_name_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5)))
        subvolume = {
            "vol_name": "cephfs",
            "subvol_name": f"subvol_{subvolume_name_generate}",
            "size": "5368706371",
            "group_name": f"{subvolume_group}",
        }
        c_out, c_err = fs_util.create_subvolume(client1,
                                                **subvolume,
                                                validate=False,
                                                check_ec=False)
        if c_err:
            result = result + 1
        c_out2, c_err2 = fs_util.remove_subvolume(client1,
                                                  **subvolume,
                                                  validate=False,
                                                  check_ec=False)
        if c_err2:
            result = result + 1
        if result != 2:
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #8
0
def run(ceph_cluster, **kw):
    try:
        log.info(f"MetaData Information {log.metadata} in {__name__}")
        fs_util = FsUtils(ceph_cluster)

        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        fuse_mounting_dir = f"/mnt/cephfs_fuse{mounting_dir}/"
        fs_util.fuse_mount(clients, fuse_mounting_dir)

        mount_test_case(clients, fuse_mounting_dir)

        kernel_mounting_dir = f"/mnt/cephfs_kernel{mounting_dir}/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(clients, kernel_mounting_dir, ",".join(mon_node_ips))

        mount_test_case(clients, kernel_mounting_dir)

        log.info("Cleaning up!-----")
        rc = fs_util.client_clean_up(
            [],
            clients,
            kernel_mounting_dir,
            "umount",
        )
        if rc != 0:
            raise CommandFailed("fuse clients cleanup failed")
        log.info("Fuse clients cleaned up successfully")

        rc = fs_util.client_clean_up(
            clients,
            [],
            fuse_mounting_dir,
            "umount",
        )
        if rc != 0:
            raise CommandFailed("kernel clients cleanup failed")
        log.info("kernel clients cleaned up successfully")
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #9
0
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. Prepare isolated_namespace name
    Test operation:
    1. Create a subvolume with isolated_namespace option
    2. Check if the creation is successful
    3. After the creation, check if the subvolume is created in isolated namespace using `ceph fs subvolume info`
    4. Remove the subvolume
    """
    try:

        tc = "CEPH-83574187"
        log.info(f"Running CephFS tests for Polarion ID -{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        random_name = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5)))
        subvol_name = "subvol_name_" + random_name
        namespace = "namespace_" + random_name
        fs_util.create_subvolume(client1,
                                 "cephfs",
                                 subvol_name,
                                 namespace_isolated=namespace)
        out1, err1 = client1.exec_command(
            sudo=True, cmd=f"ceph fs subvolume info cephfs {subvol_name}")
        isolated_pool_name = f"fsvolumens_{subvol_name}"
        output1 = json.loads(out1.read().decode())
        target_ns_name = output1["pool_namespace"]
        if target_ns_name != isolated_pool_name:
            log.error("Isolated namespace name are not identical")
            return 1
        fs_util.remove_subvolume(client1, "cephfs", subvol_name)
        return 0

    except Exception as e:
        log.error(e)
        log.error(traceback.format_exc())
        return 1
예제 #10
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573873   Try creating 2 Filesystem using same Pool(negative)
    Pre-requisites :
    1. We need atleast one client node to execute this test case

    Test Case Flow:
    1. Check if cephfs filesystem is present, if not create cephfs
    2. collect data pool and meta datapool info of cephfs
    3. try creating cephfs1 with data pool and meta datapool of cephfs
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_details = fs_util.get_fs_info(client1)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs new cephfs1 {fs_details['metadata_pool_name']} {fs_details['data_pool_name']}",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed(
                "We are able to create filesystems with same pool used by other filesystem"
            )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #11
0
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. prepare invalid pool_name
    Test operation:
    1. Try to create a subvolume in a invalid pool
    2. Check if the subvolume  is not created because of the invalid pool
    3. Using get_path, check if subvolume path is cleaned up
    """
    try:

        tc = "CEPH-83574192"
        log.info(f"Running CephFS tests for Polarion ID -{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        subvol_name = "subvol_name".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5))
        )
        invalid_pool_name = "non_exist_pool"
        out1, err1 = fs_util.create_subvolume(
            client1,
            "cephfs",
            subvol_name,
            validate=False,
            check_ec=False,
            pool_layout=invalid_pool_name,
        )
        out2, err2 = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath cephfs {subvol_name}",
            check_ec=False,
        )
        if out1 == 0 or out2 == 0:
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):

    """
    Test operation:
    1. Create a subvolume
    2. Check info for the subvolume
    3. Check if gid and uid are set to 0
    """
    try:
        tc = "CEPH-83574181"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        subvolume_name_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5))
        )
        subvolume = {
            "vol_name": "cephfs",
            "subvol_name": f"subvol_{subvolume_name_generate}",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume, check_ec=False)
        c_out, c_err = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume info cephfs subvol_{subvolume_name_generate}",
        )
        c_out_decoded = json.loads(c_out.read().decode())
        gid = c_out_decoded["gid"]
        uid = c_out_decoded["uid"]
        if gid != 0 or uid != 0:
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #13
0
def run(ceph_cluster, **kw):
    """
    pre-requisites:
    1. prepare invalid pool_name
    Test operation:
    1. Try to create a subvolume group with invalid pool
    2. Check if the subvolume group is not created beacuse of the invalid pool
    3. Using get_path, check if subvolumegroup path is cleaned up
    """
    try:
        tc = "CEPH-83574163"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_util.auth_list([client1])
        subvol_group_name = "subvol_group"
        invalid_pool_name = "non_exist_pool"
        out1, err1 = fs_util.create_subvolumegroup(
            client1, "cephfs", subvol_group_name, pool_layout=invalid_pool_name
        )
        out2, err2 = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath cephfs {subvol_group_name}",
            check_ec=False,
        )
        if out1 == 0 or out2 == 0:
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573870 - Create 2 Filesystem with default values on different MDS daemons
    Pre-requisites :
    1. We need atleast one client node to execute this test case

    Test Case Flow:
    1. Create 2 file systems with placement arguments
    2. validate the mds came on the specified placements
    3. mount both the file systems and using fuse mount
    4. Run IOs on the FS
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))

        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        client1 = clients[0]
        out, rc = client1.exec_command(
            sudo=True, cmd="ceph orch ps --daemon_type mds -f json")
        daemon_ls_before = json.loads(out.read().decode())
        daemon_count_before = len(daemon_ls_before)
        host_list = [
            client1.node.hostname.replace("node7", "node2"),
            client1.node.hostname.replace("node7", "node3"),
        ]
        hosts = " ".join(host_list)
        client1.exec_command(
            sudo=True,
            cmd=f"ceph fs volume create cephfs_new --placement='2 {hosts}'",
            check_ec=False,
        )
        fs_util.wait_for_mds_process(client1, "cephfs_new")
        out, rc = client1.exec_command(
            sudo=True, cmd="ceph orch ps --daemon_type mds -f json")
        daemon_ls_after = json.loads(out.read().decode())
        daemon_count_after = len(daemon_ls_after)
        assert daemon_count_after > daemon_count_before, (
            "daemon count is reduced after creating FS. "
            "Expectation is MDS daemons whould be more")

        total_fs = fs_util.get_fs_details(client1)
        if len(total_fs) < 2:
            log.error(
                "We can't proceed with the test case as we are not able to create 2 filesystems"
            )
        fs_names = [fs["name"] for fs in total_fs]
        validate_mds_placements("cephfs_new", daemon_ls_after, hosts, 2)
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"

        fs_util.fuse_mount([clients[0]],
                           fuse_mounting_dir_1,
                           extra_params=f"--client_fs {fs_names[0]}")
        fs_util.fuse_mount([clients[0]],
                           fuse_mounting_dir_2,
                           extra_params=f"--client_fs {fs_names[1]}")
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{fuse_mounting_dir_1}",
            long_running=True,
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 1 --top "
            f"{fuse_mounting_dir_2}",
            long_running=True,
        )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_2)
        commands = [
            "ceph config set mon mon_allow_pool_delete true",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        fs_util.remove_fs(client1, "cephfs_new")
def run(ceph_cluster, **kw):
    """
    Pre-requisites:
    1. Create 2 cephfs volume
       creats fs volume create <vol_name>

    Test operation:
    1. Mount both cephfs with same client
    2. Remove all data from both cephfs
    3. Run IO's on first cephfs
    4. Verify second cephfs has not data
    5. Copy first cephfs data to local directory
    6. Run IO's on second cephfs
    7. Verify data consistency of first cephfs using local directory
    8. Copy second cephfs data to another local directory
    9. Run IO's on first cephfs
    10. Verify data consistency of second cephfs using local directory

    Clean-up:
    1. Remove all the data in Cephfs file system
    2. Remove all the cephfs mounts
    """
    try:
        tc = "CEPH-83573876"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        mount_points = []
        fs1 = "cephfs"
        log.info(f"mounting {fs1}")
        fs1_kernel_mount_dir = "/mnt/kernel_" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        fs_util.kernel_mount(
            clients,
            fs1_kernel_mount_dir,
            mon_node_ip,
            new_client_hostname="admin",
            extra_params=f",fs={fs1}",
        )
        fs1_fuse_mount_dir = "/mnt/fuse_" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        mount_points.extend([fs1_kernel_mount_dir, fs1_fuse_mount_dir])
        fs_util.fuse_mount(
            clients,
            fs1_fuse_mount_dir,
            new_client_hostname="admin",
            extra_params=f" --client_fs {fs1}",
        )
        fs2 = "cephfs-ec"
        log.info(f"mounting {fs2}")
        fs2_kernel_mount_dir = "/mnt/kernel_" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        fs_util.kernel_mount(
            clients,
            fs2_kernel_mount_dir,
            mon_node_ip,
            new_client_hostname="admin",
            extra_params=f",fs={fs2}",
        )
        fs2_fuse_mount_dir = "/mnt/fuse_" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        mount_points.extend([fs2_kernel_mount_dir, fs2_fuse_mount_dir])
        fs_util.fuse_mount(
            clients,
            fs2_fuse_mount_dir,
            new_client_hostname="admin",
            extra_params=f" --client_fs {fs2}",
        )
        log.info(f"Remove all data in both {fs1} & {fs2}")
        commands = [
            f"rm -rf {fs1_kernel_mount_dir}/*",
            f"rm -rf {fs2_kernel_mount_dir}/*",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        log.info(f"Run IO's on {fs1}")
        commands = [
            f"mkdir {fs1_kernel_mount_dir}/dir1",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
            f"--files 1000 --files-per-dir 10 --top {fs1_kernel_mount_dir}/dir1",
            f"dd if=/dev/urandom of={fs1_kernel_mount_dir}/file1 bs=4M count=1000; done",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        log.info(f"Copy {fs1} data to local directory")
        commands = [
            "mkdir /home/cephuser/cephfs_backup",
            f"cp {fs1_kernel_mount_dir}/* -R /home/cephuser/cephfs_backup/",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        log.info(f"Verifying {fs2} has no data after IO's on {fs1}")
        output, rc = client1.exec_command(sudo=True,
                                          cmd=f"ls {fs2_kernel_mount_dir}")
        if "" == output:
            log.info(f"{fs2} has no data")
        else:
            if "file1" in output or "dir1" in output:
                log.error("Data is being shared accross file systems")
                return 1
            else:
                log.error("Directory is not empty")
                return 1
        log.info(f"Run IO's on {fs2}")
        commands = [
            f"mkdir {fs2_kernel_mount_dir}/dir2",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
            f"--files 1000 --files-per-dir 10 --top {fs2_kernel_mount_dir}/dir2",
            f"dd if=/dev/urandom of={fs2_kernel_mount_dir}/file2 bs=1M count=1000; done",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        log.info(f"Copy {fs2} data to another local directory")
        commands = [
            "mkdir /home/cephuser/cephfs_ec_backup",
            f"cp {fs2_kernel_mount_dir}/* -R /home/cephuser/cephfs_ec_backup/",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        log.info(f"Verifying data consistency in {fs1} after IO's on {fs2}")
        command = f"diff -qr {fs1_kernel_mount_dir} /home/cephuser/cephfs_backup/"
        rc = client1.exec_command(sudo=True, cmd=command, long_running=True)
        if rc == 0:
            log.info(f"Data is consistent in {fs1}")
        else:
            log.error(f"Data is inconsistent in {fs1}")
            return 1
        log.info(f"Run IO's on {fs1}")
        commands = [
            f"mkdir {fs1_kernel_mount_dir}/dir3",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
            f"--files 1000 --files-per-dir 10 --top {fs1_kernel_mount_dir}/dir3",
            f"dd if=/dev/urandom of={fs1_kernel_mount_dir}/file3 bs=8M count=1000; done",
        ]
        log.info(f"Verifying data consistency in {fs2} after IO's on {fs1}")
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        command = f"diff -qr {fs2_kernel_mount_dir} /home/cephuser/cephfs_ec_backup/"
        rc = client1.exec_command(sudo=True, cmd=command, long_running=True)
        if rc == 0:
            log.info(f"Data is consistent in {fs2}")
        else:
            log.error(f"Data is inconsistent in {fs2}")
            return 1
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up the system")
        client1.exec_command(sudo=True, cmd=f"rm -rf {mount_points[0]}/*")
        client1.exec_command(sudo=True, cmd=f"rm -rf {mount_points[2]}/*")
        client1.exec_command(sudo=True,
                             cmd="rm -rf /home/cephuser/*backup/",
                             check_ec=False)
        for client in clients:
            for mount_point in mount_points:
                client.exec_command(sudo=True,
                                    cmd=f"umount {mount_point}",
                                    check_ec=False)
예제 #16
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573521	Remove a subvolume group by retaining the snapshot : ceph fs subvolume rm <vol_n...

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_retain_snapshot_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_retain_snapshot_1
    6. Collect the data from mount volume to local disk for verififaction

    Retain the snapshots nad verify the data after cloning:
    1. ceph fs snapshot rm <vol_name> <subvol_name> snap_name [--group_name <subvol_group_name>
        --force --retain-snapshots]
    2. Remove the sub volume.
    3. Clone the new volume from the retained snapshots
    4. Check the contents of the cloned volume with the copy present locally

    Clean Up:
    1. Del all the snapshots created
    2. Del Subvolumes
    3. Del SubvolumeGroups
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_retain_snapshot_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "group_name": "subvolgroup_retain_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_retain_snapshot subvolgroup_retain_snapshot_1",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvolgroup_retain_snapshot_1",
        }
        fs_util.create_snapshot(client1, **snapshot)
        client1.exec_command(sudo=True, cmd=f"mkdir -p /tmp/{mounting_dir}")
        client1.exec_command(
            sudo=True,
            cmd=f"cp -r {kernel_mounting_dir_1}/* /tmp/{mounting_dir}")

        fs_util.remove_subvolume(client1,
                                 **subvolume,
                                 retain_snapshots=True,
                                 force=True,
                                 validate=False)
        log.info(
            "Verifying Get the path of sub volume as subvolume will still be listed in filesystem"
        )
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_retain_snapshot subvolgroup_retain_snapshot_1",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed(
                "Remove subvolume with --retainSnapshots has not succeeded."
                "We are still able to fetch path of subvolume after deletion")
            return 1
        log.info("Clone a subvolume from snapshot")
        retain_snapshot_1 = {
            "vol_name": default_fs,
            "subvol_name": "subvol_retain_snapshot",
            "snap_name": "snap_1",
            "target_subvol_name": "retain_snapshot_1",
            "group_name": "subvolgroup_retain_snapshot_1",
        }
        fs_util.create_clone(client1, **retain_snapshot_1)
        fs_util.validate_clone_state(client1, retain_snapshot_1)
        clonevol_path, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} {retain_snapshot_1['target_subvol_name']}",
        )
        fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_2,
            extra_params=f" -r {clonevol_path.read().decode().strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=f"diff -qr /tmp/{mounting_dir} {fuse_mounting_dir_2}")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        rmclone_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "retain_snapshot_1"
            },
        ]
        for clone_vol in rmclone_list:
            fs_util.remove_subvolume(client1, **clone_vol)
        fs_util.remove_snapshot(client1,
                                **snapshot,
                                validate=False,
                                force=True)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
예제 #17
0
def run(ceph_cluster, **kw):
    try:
        log.info(f"MetaData Information {log.metadata} in {__name__}")
        tc = "nfs-ganesha"
        nfs_mounting_dir = "/mnt/nfs/"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        rhbuild = config.get("rhbuild")
        if "5." in rhbuild:
            from tests.cephfs.cephfs_utilsV1 import FsUtils

            fs_util = FsUtils(ceph_cluster)
            nfs_server = ceph_cluster.get_ceph_objects("nfs")
            nfs_client = ceph_cluster.get_ceph_objects("client")
            fs_util.auth_list(nfs_client)
            nfs_name = "cephfs-nfs"
            out, rc = nfs_client[0].exec_command(
                sudo=True, cmd="ceph fs ls | awk {' print $2'} ")
            fs_name = out.rstrip()
            fs_name = fs_name.strip(",")
            nfs_export_name = "/export1"
            path = "/"
            nfs_server_name = nfs_server[0].node.hostname
            # Create ceph nfs cluster
            nfs_client[0].exec_command(sudo=True,
                                       cmd="ceph mgr module enable nfs")
            out, rc = nfs_client[0].exec_command(
                sudo=True,
                cmd=f"ceph nfs cluster create {nfs_name} {nfs_server_name}")
            # Verify ceph nfs cluster is created
            if wait_for_process(client=nfs_client[0],
                                process_name=nfs_name,
                                ispresent=True):
                log.info("ceph nfs cluster created successfully")
            else:
                raise CommandFailed("Failed to create nfs cluster")
            # Create cephfs nfs export
            if "5.0" in rhbuild:
                nfs_client[0].exec_command(
                    sudo=True,
                    cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name} "
                    f"{nfs_export_name} path={path}",
                )
            else:
                nfs_client[0].exec_command(
                    sudo=True,
                    cmd=f"ceph nfs export create cephfs {nfs_name} "
                    f"{nfs_export_name} {fs_name} path={path}",
                )

            # Verify ceph nfs export is created
            out, rc = nfs_client[0].exec_command(
                sudo=True, cmd=f"ceph nfs export ls {nfs_name}")
            if nfs_export_name in out:
                log.info("ceph nfs export created successfully")
            else:
                raise CommandFailed("Failed to create nfs export")
            # Mount ceph nfs exports
            nfs_client[0].exec_command(sudo=True,
                                       cmd=f"mkdir -p {nfs_mounting_dir}")
            assert wait_for_cmd_to_succeed(
                nfs_client[0],
                cmd=
                f"mount -t nfs -o port=2049 {nfs_server_name}:{nfs_export_name} {nfs_mounting_dir}",
            )
            nfs_client[0].exec_command(
                sudo=True,
                cmd=
                f"mount -t nfs -o port=2049 {nfs_server_name}:{nfs_export_name} {nfs_mounting_dir}",
            )
            out, rc = nfs_client[0].exec_command(cmd="mount")
            mount_output = out.split()
            log.info("Checking if nfs mount is is passed of failed:")
            assert nfs_mounting_dir.rstrip("/") in mount_output
            log.info("Creating Directory")
            out, rc = nfs_client[0].exec_command(
                sudo=True, cmd=f"mkdir {nfs_mounting_dir}{dir_name}")
            nfs_client[0].exec_command(
                sudo=True,
                cmd=
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
                f"{nfs_mounting_dir}{dir_name}",
                long_running=True,
            )
            nfs_client[0].exec_command(
                sudo=True,
                cmd=
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
                f"{nfs_mounting_dir}{dir_name}",
                long_running=True,
            )
            # Unmount nfs
            nfs_client[0].exec_command(sudo=True,
                                       cmd=f"umount {nfs_mounting_dir}")
            # Delete cephfs nfs export
            nfs_client[0].exec_command(
                sudo=True,
                cmd=f"ceph nfs export delete {nfs_name} {nfs_export_name}")
            # Verify cephfs nfs export is deleted
            out, rc = nfs_client[0].exec_command(
                sudo=True, cmd=f"ceph nfs export ls {nfs_name}")

            if nfs_export_name not in out:
                log.info("cephf nfs export deleted successfully")
            else:
                raise CommandFailed("Failed to delete cephfs nfs export")
            # Delete nfs cluster
            nfs_client[0].exec_command(
                sudo=True, cmd=f"ceph nfs cluster delete {nfs_name}")
            # Adding Delay to reflect in cluster list
            time.sleep(5)
            if not wait_for_process(client=nfs_client[0],
                                    process_name=nfs_name,
                                    ispresent=False):
                raise CommandFailed("Cluster has not been deleted")
            # Verify nfs cluster is deleted
            out, rc = nfs_client[0].exec_command(sudo=True,
                                                 cmd="ceph nfs cluster ls")
            if nfs_name not in out:
                log.info("ceph nfs cluster deleted successfully")
            else:
                raise CommandFailed("Failed to delete nfs cluster")

        else:
            from tests.cephfs.cephfs_utils import FsUtils

            fs_util = FsUtils(ceph_cluster)
            client_info, rc = fs_util.get_clients(build)
            if rc == 0:
                log.info("Got client info")
            else:
                raise CommandFailed("fetching client info failed")
            nfs_server = [client_info["kernel_clients"][0]]
            nfs_client = [client_info["kernel_clients"][1]]
            rc1 = fs_util.auth_list(nfs_server)
            rc2 = fs_util.auth_list(nfs_client)
            print(rc1, rc2)
            if rc1 == 0 and rc2 == 0:
                log.info("got auth keys")
            else:
                raise CommandFailed("auth list failed")
            rc = fs_util.nfs_ganesha_install(nfs_server[0])
            if rc == 0:
                log.info("NFS ganesha installed successfully")
            else:
                raise CommandFailed("NFS ganesha installation failed")
            rc = fs_util.nfs_ganesha_conf(nfs_server[0], "admin")
            if rc == 0:
                log.info("NFS ganesha config added successfully")
            else:
                raise CommandFailed("NFS ganesha config adding failed")
            rc = fs_util.nfs_ganesha_mount(nfs_client[0], nfs_mounting_dir,
                                           nfs_server[0].node.hostname)
            if rc == 0:
                log.info("NFS-ganesha mount passed")
            else:
                raise CommandFailed("NFS ganesha mount failed")

            mounting_dir = nfs_mounting_dir + "ceph/"
            out, rc = nfs_client[0].exec_command(
                sudo=True, cmd=f"mkdir {mounting_dir}{dir_name}")
            nfs_client[0].exec_command(
                sudo=True,
                cmd=
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
                f"{mounting_dir}{dir_name}",
                long_running=True,
            )
            nfs_client[0].exec_command(
                sudo=True,
                cmd=
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
                f"{mounting_dir}{dir_name}",
                long_running=True,
            )
            log.info("Cleaning up")
            nfs_client[0].exec_command(sudo=True,
                                       cmd=f"rm -rf {mounting_dir}*")
            log.info("Unmounting nfs-ganesha mount on client:")
            nfs_client[0].exec_command(sudo=True,
                                       cmd=" umount %s -l" %
                                       (nfs_mounting_dir))
            log.info("Removing nfs-ganesha mount dir on client:")
            nfs_client[0].exec_command(sudo=True,
                                       cmd="rm -rf  %s" % (nfs_mounting_dir))

            log.info("Cleaning up successfull")
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #18
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573520	Validate the max snapshot that can be created under a root FS sub volume level.
                    Increase by 50 at a time until it reaches the max limit.

    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. creats fs volume create cephfs if the volume is not there
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_max_snap --size 5368706371 --group_name subvolgroup_1
    4. Create Data on the subvolume
        Ex:  python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
            100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/

    Test Script Flow :
    1. We will create snapshots in batch of 50 till 1000.
    2. We will breakout when ever it reaches the maximum allowed snapshots.

    Clean up:
    1. Deletes all the snapshots created
    2. Deletes snapshot and subvolume created.
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_max_snap",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_max_snap",
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )
        client1.exec_command(
            sudo=True,
            cmd=
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 "
            f"--files 100 --files-per-dir 10 --dirs-per-dir 2 --top "
            f"{kernel_mounting_dir_1}",
            long_running=True,
        )
        snapshot_list = [{
            "vol_name": default_fs,
            "subvol_name": "subvol_max_snap",
            "snap_name": f"snap_limit_{x}",
        } for x in range(1, 1000)]
        for i in range(0, 1000, 50):
            for snapshot in snapshot_list[i:i + 50]:
                try:
                    fs_util.create_snapshot(clients[0],
                                            **snapshot,
                                            validate=False)
                except CommandFailed:
                    log.info(
                        f"Max Snapshots allowed under a root FS sub volume level is {i}"
                    )
                    max_sanpshots_allowed = i
                    break
            else:
                continue
            break
            log.info(f"Snapshot creation is successful from {i} to {i+50}")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        for snapshot in snapshot_list[0:max_sanpshots_allowed]:
            fs_util.remove_snapshot(client1, **snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
예제 #19
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573409    Test to validate the removal of quota_max_bytes
                     Create a FS and create 10 directories and mount them on kernel and fuse client(5 mounts each)
                     Set max bytes quota to a number(say 1Gb) and fill data until it reaches the limit and
                     verify if the set quota limit is working fine.
                     Remove the quota once it reaches the max bytes and try adding more
                     data, verify if set quota is removed. Repeat the procedure for few more times.


    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. set byte attribute 1gb on both mount points
    4. Create 3gb bytes and check it fails
    5. Perform same on kernel mount
    6. Remove the quota of bytes and try creating the files on the same directory used in step 3 ie., set bytes to 0
    7. Perform same on Kernel mount
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_quota_byte_remove_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_remove_fuse",
                "group_name": "subvolgroup_quota_byte_remove_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_byte_remove_kernel",
                "group_name": "subvolgroup_quota_byte_remove_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_byte_remove_kernel subvolgroup_quota_byte_remove_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_byte_remove_fuse subvolgroup_quota_byte_remove_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.read().decode().strip()}",
        )
        fs_util.set_quota_attrs(clients[0], 100, 1073741824,
                                fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        log.info("Removing the quota to 0 and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 100, "0", fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        fs_util.set_quota_attrs(clients[0], 100, 1073741824,
                                kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        log.info("Increasing the quota to 0 and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 100, "0", kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.byte_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name> --mode <octal_value>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create cephfs subvolumegroup with different octal modes")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1",
                "mode": "777",
            },
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_2",
                "mode": "700",
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1",
                "mode": "755",
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info("Create 2 Sub volumes on each of the subvolume group of Size 5GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_2",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_2",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_5",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_6",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernel and 1 subvolume on Fuse → Client1"
        )
        if build.startswith("5"):
            kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_1,
                ",".join(mon_node_ips),
                extra_params=f",fs={default_fs}",
            )
            fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_1,
                extra_params=f" --client_fs {default_fs}",
            )

            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_3 subvolgroup_2",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
                extra_params=f",fs={default_fs}",
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_4 subvolgroup_2",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_2,
                extra_params=f" --client_fs {default_fs}",
            )

            kernel_mounting_dir_3 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")
            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_5 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_3,
                ",".join(mon_node_ips),
                extra_params=",fs=cephfs-ec",
            )
            fuse_mounting_dir_3 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_6 subvolgroup_ec1",
            )
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_3,
                extra_params=" --client_fs cephfs-ec",
            )
        else:
            kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_1,
                ",".join(mon_node_ips),
            )
            fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_1,
            )

            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_2/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_3 subvolgroup_2",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_2/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume getpath {default_fs} subvol_4 subvolgroup_2",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_2,
            )

            kernel_mounting_dir_3 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")
            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_5 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_3,
                ",".join(mon_node_ips),
                extra_params=",fs=cephfs-ec",
            )
            fuse_mounting_dir_3 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_6 subvolgroup_ec1",
            )
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_3,
                extra_params=" --client_fs cephfs-ec",
            )

        log.info("Get the path of subvolume groups")
        subvolgroup1_getpath, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolumegroup getpath {default_fs} subvolgroup_1",
        )
        subvolgroup1_getpath = subvolgroup1_getpath.strip()

        subvolgroup2_getpath, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolumegroup getpath {default_fs} subvolgroup_2",
        )
        subvolgroup2_getpath = subvolgroup2_getpath.strip()

        subvolgroup_ec_getpath, rc = clients[0].exec_command(
            sudo=True,
            cmd="ceph fs subvolumegroup getpath cephfs-ec subvolgroup_ec1",
        )
        subvolgroup_ec_getpath = subvolgroup_ec_getpath.strip()

        def get_defined_mode(group_name, subvolumegroup_list):
            for subvolumegroup in subvolumegroup_list:
                if group_name == subvolumegroup["group_name"]:
                    return subvolumegroup.get("mode")

        log.info("Validate the octal mode set on the subgroup")
        subgroup_1_mode = get_defined_mode(
            "subvolgroup_1", subvolumegroup_list=subvolumegroup_list
        )
        subgroup_2_mode = get_defined_mode(
            "subvolgroup_2", subvolumegroup_list=subvolumegroup_list
        )
        subgroup_ec_mode = get_defined_mode(
            "subvolgroup_ec1", subvolumegroup_list=subvolumegroup_list
        )
        stat_of_octal_mode_on_kernel_dir1 = fs_util.get_stats(
            client=clients[0],
            file_path=kernel_mounting_dir_1.rstrip("/") + subvolgroup1_getpath,
            format="%a",
        )
        stat_of_octal_mode_on_kernel_dir2 = fs_util.get_stats(
            client=clients[0],
            file_path=kernel_mounting_dir_2.rstrip("/") + subvolgroup2_getpath,
            format="%a",
        )
        stat_of_octal_mode_on_kernel_dir3 = fs_util.get_stats(
            client=clients[1],
            file_path=kernel_mounting_dir_3.rstrip("/") + subvolgroup_ec_getpath,
            format="%a",
        )
        stat_of_octal_mode_on_fuse_dir1 = fs_util.get_stats(
            client=clients[0],
            file_path=fuse_mounting_dir_1.rstrip("/") + subvolgroup1_getpath,
            format="%a",
        )
        stat_of_octal_mode_on_fuse_dir2 = fs_util.get_stats(
            client=clients[0],
            file_path=fuse_mounting_dir_2.rstrip("/") + subvolgroup2_getpath,
            format="%a",
        )
        stat_of_octal_mode_on_fuse_dir3 = fs_util.get_stats(
            client=clients[1],
            file_path=fuse_mounting_dir_3.rstrip("/") + subvolgroup_ec_getpath,
            format="%a",
        )

        if int(subgroup_1_mode) != int(stat_of_octal_mode_on_kernel_dir1) and int(
            subgroup_1_mode
        ) != int(stat_of_octal_mode_on_fuse_dir1):
            log.error("Octal values are mismatching on subvolgroup_1")
            return 1
        if int(subgroup_2_mode) != int(stat_of_octal_mode_on_kernel_dir2) and int(
            subgroup_2_mode
        ) != int(stat_of_octal_mode_on_fuse_dir2):
            log.error("Octal values are mismatching on subvolgroup_2")
            return 1
        if int(subgroup_ec_mode) != int(stat_of_octal_mode_on_kernel_dir3) and int(
            subgroup_ec_mode
        ) != int(stat_of_octal_mode_on_fuse_dir3):
            log.error("Octal values are mismatching on subvolgroup_ec1")
            return 1

        log.info("Run IO's")
        with parallel() as p:
            for i in [
                kernel_mounting_dir_1,
                fuse_mounting_dir_1,
                kernel_mounting_dir_2,
                fuse_mounting_dir_2,
            ]:
                p.spawn(fs_util.run_ios, clients[0], i)

            for i in [kernel_mounting_dir_3, fuse_mounting_dir_3]:
                p.spawn(fs_util.run_ios, clients[1], i)

        log.info("Clean up the system")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)

        for i in [kernel_mounting_dir_1, kernel_mounting_dir_2]:
            fs_util.client_clean_up(
                "umount", kernel_clients=[clients[0]], mounting_dir=i
            )

        for i in [fuse_mounting_dir_1, fuse_mounting_dir_2]:
            fs_util.client_clean_up("umount", fuse_clients=[clients[0]], mounting_dir=i)

        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[1]], mounting_dir=kernel_mounting_dir_3
        )
        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[1]], mounting_dir=fuse_mounting_dir_3
        )

        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true"
        )

        return 0
    except Exception as e:
        log.error(e)
        log.error(traceback.format_exc())
        return 1
예제 #21
0
def run(ceph_cluster, **kw):
    """
    CEPH-83574024 - Ensure Snapshot and cloning works on nfs exports
    Pre-requisites:
    1. Create cephfs volume
       creats fs volume create <vol_name>
    2. Create nfs cluster
       ceph nfs cluster create <nfs_name> <nfs_server>

    Test operation:
    1. Create cephfs nfs export
       ceph nfs export create cephfs <fs_name> <nfs_name> <nfs_export_name> path=<export_path>
    2. Crete cephfs subvolume group
    3. Create cephfs subvolume in cephfs subvolume group
    4. Create cephfs subvolume in deafault cephfs subvolume group
    5. Mount nfs mount with cephfs export
       "mount -t nfs -o port=2049 <nfs_server>:<nfs_export> <nfs_mounting_dir>
    6. Run IOs on both cephfs subvolumes
    7. Create snapshots of both cephfs subvolumes
    8. Create clone of both cephfs subvolumes from snapshots
    9. Verify data is consistent across subvolumes, snapshots & clones

    Clean-up:
    1. Remove all the data in Cephfs file system
    2. Remove all the cephfs mounts
    3. Delete cephfs nfs export
    """
    try:
        tc = "CEPH-83574024"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        rhbuild = config.get("rhbuild")
        nfs_servers = ceph_cluster.get_ceph_objects("nfs")
        nfs_server = nfs_servers[0].node.hostname
        nfs_name = "cephfs-nfs"
        nfs_export_name = "/export_" + "".join(
            secrets.choice(string.digits) for i in range(3))
        export_path = "/"
        fs_name = "cephfs"
        nfs_mounting_dir = "/mnt/nfs_" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        if "5.0" in rhbuild:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name} "
                f"{nfs_export_name} path={export_path}",
            )
        else:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {nfs_name} "
                f"{nfs_export_name} {fs_name} path={export_path}",
            )
        subvolumegroup = {
            "vol_name": fs_name,
            "group_name": "subvolume_group1",
        }
        fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": fs_name,
                "subvol_name": "subvolume1",
                "group_name": "subvolume_group1",
            },
            {
                "vol_name": fs_name,
                "subvol_name": "subvolume2",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(client1, **subvolume)
        commands = [
            f"mkdir -p {nfs_mounting_dir}",
            f"mount -t nfs -o port=2049 {nfs_server}:{nfs_export_name} {nfs_mounting_dir}",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {fs_name} subvolume1 --group_name subvolume_group1",
        )
        subvolume1_path = out.read().decode().rstrip()
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {fs_name} subvolume2")
        subvolume2_path = out.read().decode().rstrip()
        commands = [
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --file-size 4 "
            f"--files 1000 --top {nfs_mounting_dir}{subvolume1_path}",
            f"for n in {{1..20}}; do     dd if=/dev/urandom of={nfs_mounting_dir}{subvolume2_path}"
            f"/file$(printf %03d "
            "$n"
            ") bs=500k count=1000; done",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command, long_running=True)
        commands = [
            f"ceph fs subvolume snapshot create {fs_name} subvolume1 snap1 --group_name subvolume_group1",
            f"ceph fs subvolume snapshot create {fs_name} subvolume2 snap2",
        ]
        for command in commands:
            out, err = client1.exec_command(sudo=True, cmd=command)
        clone_status_1 = {
            "vol_name": fs_name,
            "subvol_name": "subvolume1",
            "snap_name": "snap1",
            "target_subvol_name": "clone1",
            "group_name": "subvolume_group1",
            "target_group_name": "subvolume_group1",
        }
        fs_util.create_clone(client1, **clone_status_1)
        fs_util.validate_clone_state(client1, clone_status_1, timeout=6000)
        clone_status_2 = {
            "vol_name": fs_name,
            "subvol_name": "subvolume2",
            "snap_name": "snap2",
            "target_subvol_name": "clone2",
        }
        fs_util.create_clone(client1, **clone_status_2)
        fs_util.validate_clone_state(client1, clone_status_2, timeout=6000)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {fs_name} clone1 --group_name subvolume_group1",
        )
        clone1_path = out.read().decode().rstrip()
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ceph fs subvolume getpath {fs_name} clone2")
        clone2_path = out.read().decode().rstrip()
        commands = [
            f"diff -r {nfs_mounting_dir}{subvolume1_path} {nfs_mounting_dir}{subvolume1_path}/.snap/_snap1*",
            f"diff -r {nfs_mounting_dir}{subvolume2_path} {nfs_mounting_dir}{subvolume2_path}/.snap/_snap2*",
            f"diff -r {nfs_mounting_dir}{subvolume1_path} {nfs_mounting_dir}{clone1_path}",
            f"diff -r {nfs_mounting_dir}{subvolume2_path} {nfs_mounting_dir}{clone2_path}",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
        log.info("Test completed successfully")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up")
        client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume snapshot rm {fs_name} subvolume1 snap1 --group_name subvolume_group1",
            check_ec=False,
        )
        client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume snapshot rm {fs_name} subvolume2 snap2",
            check_ec=False,
        )
        client1.exec_command(sudo=True, cmd=f"rm -rf {nfs_mounting_dir}/*")
        client1.exec_command(sudo=True, cmd=f"umount {nfs_mounting_dir}")
        client1.exec_command(sudo=True,
                             cmd=f"rm -rf {nfs_mounting_dir}/",
                             check_ec=False)
        client1.exec_command(
            sudo=True,
            cmd=f"ceph nfs export delete {nfs_name} {nfs_export_name}",
            check_ec=False,
        )
예제 #22
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. Fill 60% date of the cluster
    Test operation:
    1. Create a volume
    2. Mount the cephfs on both fuse and kernel clients
    3. Create few directory from the both clients
    4. Execute the command "ceph fs set <fs_name> max_mds n [where n is the number]"
    5. Check if the number of mds increases and decreases properly
    """
    try:
        tc = "CEPH-83573462"
        log.info(f"Running CephFS tests for BZ-{tc}")
        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        client2 = clients[1]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        mon_node_ips = fs_util.get_mon_node_ips()
        kernel_dir_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5))
        )
        kernel_mounting_dir = f"/mnt/cephfs_kernel{kernel_dir_generate}/"
        fs_util.auth_list([client1])
        fs_util.kernel_mount([client1], kernel_mounting_dir, ",".join(mon_node_ips))
        client1.exec_command(
            sudo=True,
            cmd=f"dd if=/dev/zero of={kernel_mounting_dir}" + ".txt bs=5M count=1000",
            long_running=True,
        )
        for i in range(10):
            dir_name_generate = "".join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in list(range(5))
            )
            client1.exec_command(
                sudo=True, cmd=f"mkdir {kernel_mounting_dir}dir_{dir_name_generate}"
            )
        fuse_dir_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5))
        )
        fuse_mounting_dir = f"/mnt/cephfs_fuse{fuse_dir_generate}/"
        client2.exec_command(sudo=True, cmd="dnf install ceph-fuse")
        fs_util.auth_list([client2])
        fs_util.fuse_mount([client2], fuse_mounting_dir)
        for i in range(10):
            dir_name_generate = "".join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in list(range(5))
            )
            client2.exec_command(
                sudo=True, cmd=f"mkdir {fuse_mounting_dir}dir_{dir_name_generate}"
            )
        c1_out, c1_result = client1.exec_command(
            sudo=True, cmd="ceph fs get cephfs -f json"
        )
        decoded_out = json.loads(c1_out.read().decode())
        number_of_up_temp = decoded_out["mdsmap"]["up"]
        number_of_up = len(number_of_up_temp)
        number_of_mds_max = decoded_out["mdsmap"]["max_mds"]
        c1_out2, result2 = client1.exec_command(sudo=True, cmd="ceph -s -f json")
        decoded_out2 = json.loads(c1_out2.read().decode())
        number_of_standby = decoded_out2["fsmap"]["up:standby"]
        log.info(number_of_standby)
        counts = number_of_standby
        for i in range(counts):
            number_of_mds_max = number_of_mds_max + 1
            client1.exec_command(
                sudo=True, cmd=f"ceph fs set cephfs max_mds {str(number_of_mds_max)}"
            )
            number_of_standby = number_of_standby - 1
            number_of_up = number_of_up + 1
            time.sleep(50)
            kernel_output, kernel_result = client1.exec_command(
                sudo=True, cmd="ceph fs get cephfs -f json"
            )
            kernel_decoded = json.loads(kernel_output.read().decode())
            current_max_mds = kernel_decoded["mdsmap"]["max_mds"]
            kernel_output2, kernel_result2 = client1.exec_command(
                sudo=True, cmd="ceph -s -f json"
            )
            kernel_decoded2 = json.loads(kernel_output2.read().decode())
            current_standby = kernel_decoded2["fsmap"]["up:standby"]
            if current_max_mds != number_of_mds_max:
                return 1
            if number_of_up != number_of_mds_max:
                return 1
            if number_of_standby != current_standby:
                return 1
        for i in range(counts):
            number_of_mds_max = number_of_mds_max - 1
            client1.exec_command(
                sudo=True, cmd=f"ceph fs set cephfs max_mds {str(number_of_mds_max)}"
            )
            number_of_standby = number_of_standby + 1
            number_of_up = number_of_up - 1
            time.sleep(50)
            kernel_output, kernel_result = client1.exec_command(
                sudo=True, cmd="ceph fs get cephfs -f json"
            )
            kernel_decoded = json.loads(kernel_output.read().decode())
            current_max_mds = kernel_decoded["mdsmap"]["max_mds"]
            kernel_output2, kernel_result2 = client1.exec_command(
                sudo=True, cmd="ceph -s -f json"
            )
            kernel_decoded2 = json.loads(kernel_output2.read().decode())
            current_standby = kernel_decoded2["fsmap"]["up:standby"]
            if current_max_mds != number_of_mds_max:
                return 1
            if number_of_up != number_of_mds_max:
                return 1
            if number_of_standby != current_standby:
                return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name> --gid <num> --uid <num>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create cephfs subvolumegroup with customized uid and gid ")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1",
                "uid": "20",
                "gid": "30",
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1",
                "uid": "40",
                "gid": "50",
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info("Create 2 Sub volumes on each of the subvolume group Size 5 GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernel and 1 subvloume on Fuse → Client1"
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
        )

        log.info(
            "On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2"
        )
        if build.startswith("5"):
            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_3 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd="ceph fs subvolume getpath cephfs-ec subvol_4 subvolgroup_ec1",
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_2,
                extra_params=" --client_fs cephfs-ec",
            )

        log.info("Get the path of subvolume group")
        subvolgroup_default, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph fs subvolumegroup getpath {default_fs} subvolgroup_1",
        )
        subvolgroup_default_path = subvolgroup_default.read().decode().strip()
        subvolgroup_ec, rc = clients[0].exec_command(
            sudo=True,
            cmd="ceph fs subvolumegroup getpath cephfs-ec subvolgroup_ec1",
        )
        subvolgroup_ec_path = subvolgroup_ec.read().decode().strip()

        def get_defined_uid(group_name, subvolumegroup_list):
            for subvolumegroup in subvolumegroup_list:
                if group_name == subvolumegroup["group_name"]:
                    return subvolumegroup.get("uid")

        log.info("Validate the uid of the subgroup")
        subgroup_1_uid = get_defined_uid(
            "subvolgroup_1", subvolumegroup_list=subvolumegroup_list
        )
        subgroup_2_uid = get_defined_uid(
            "subvolgroup_ec1", subvolumegroup_list=subvolumegroup_list
        )
        stat_of_uid_on_kernel_default_fs = fs_util.get_stats(
            client=clients[0],
            file_path=kernel_mounting_dir_1.rstrip("/") + subvolgroup_default_path,
            format="%u",
        )
        stat_of_uid_on_kernel_default_ec = fs_util.get_stats(
            client=clients[1],
            file_path=kernel_mounting_dir_2.rstrip("/") + subvolgroup_ec_path,
            format="%u",
        )
        stat_of_uid_on_fuse_default_fs = fs_util.get_stats(
            client=clients[0],
            file_path=fuse_mounting_dir_1.rstrip("/") + subvolgroup_default_path,
            format="%u",
        )
        stat_of_uid_on_fuse_default_ec = fs_util.get_stats(
            client=clients[1],
            file_path=fuse_mounting_dir_2.rstrip("/") + subvolgroup_ec_path,
            format="%u",
        )
        if int(subgroup_1_uid) != int(stat_of_uid_on_kernel_default_fs) and int(
            subgroup_1_uid
        ) != int(stat_of_uid_on_fuse_default_fs):
            log.error("UID is mismatching on sunvolgroup_1")
            return 1
        if int(subgroup_2_uid) != int(stat_of_uid_on_fuse_default_ec) and int(
            subgroup_2_uid
        ) != int(stat_of_uid_on_kernel_default_ec):
            log.error("UID is mismatching on subvolgroup_ec1")
            return 1

        def get_defined_gid(group_name, subvolumegroup_list):
            for subvolumegroup in subvolumegroup_list:
                if group_name == subvolumegroup["group_name"]:
                    return subvolumegroup.get("gid")

        log.info("Validate the gid of the subgroup")
        subgroup_1_gid = get_defined_gid(
            "subvolgroup_1", subvolumegroup_list=subvolumegroup_list
        )
        subgroup_2_gid = get_defined_gid(
            "subvolgroup_ec1", subvolumegroup_list=subvolumegroup_list
        )
        stat_of_gid_on_kernel_default_fs = fs_util.get_stats(
            client=clients[0],
            file_path=kernel_mounting_dir_1.rstrip("/") + subvolgroup_default_path,
            format="%g",
        )
        stat_of_gid_on_kernel_default_ec = fs_util.get_stats(
            client=clients[1],
            file_path=kernel_mounting_dir_2.rstrip("/") + subvolgroup_ec_path,
            format="%g",
        )
        stat_of_gid_on_fuse_default_fs = fs_util.get_stats(
            client=clients[0],
            file_path=fuse_mounting_dir_1.rstrip("/") + subvolgroup_default_path,
            format="%g",
        )
        stat_of_gid_on_fuse_default_ec = fs_util.get_stats(
            client=clients[1],
            file_path=fuse_mounting_dir_2.rstrip("/") + subvolgroup_ec_path,
            format="%g",
        )
        if int(subgroup_1_gid) != int(stat_of_gid_on_kernel_default_fs) and int(
            subgroup_1_gid
        ) != int(stat_of_gid_on_fuse_default_fs):
            log.error("GID is mismatching on sunvolgroup_1")
            return 1
        if int(subgroup_2_gid) != int(stat_of_gid_on_kernel_default_ec) and int(
            subgroup_2_gid
        ) != int(stat_of_gid_on_fuse_default_ec):
            log.error("GID is mismatching on subvolgroup_ec1")
            return 1

        run_ios(clients[0], kernel_mounting_dir_1)
        run_ios(clients[0], fuse_mounting_dir_1)
        run_ios(clients[1], kernel_mounting_dir_2)
        run_ios(clients[1], fuse_mounting_dir_2)

        log.info("Clean up the system")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)

        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[0]], mounting_dir=kernel_mounting_dir_1
        )
        fs_util.client_clean_up(
            "umount", kernel_clients=[clients[1]], mounting_dir=kernel_mounting_dir_2
        )

        fs_util.client_clean_up(
            "umount", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir_1
        )
        fs_util.client_clean_up(
            "umount", fuse_clients=[clients[1]], mounting_dir=fuse_mounting_dir_2
        )

        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true"
        )

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #24
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec

    Subvolume Group Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. Remove the pools added as part of pool_layout
    3. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    4. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create 2 pools, 1 - Replicated , 1 - EC Data Pool")
        create_pools = [
            "ceph osd pool create cephfs-data-pool-layout",
            "ceph osd pool create cephfs-data-pool-layout-ec 64 erasure",
            "ceph osd pool set cephfs-data-pool-layout-ec allow_ec_overwrites true",
        ]
        for cmd in create_pools:
            clients[0].exec_command(sudo=True, cmd=cmd)
        log.info("Add created data pools to each of the filesystem")
        add_pool_to_FS = [
            "ceph fs add_data_pool cephfs cephfs-data-pool-layout",
            "ceph fs add_data_pool cephfs-ec cephfs-data-pool-layout-ec",
        ]
        for cmd in add_pool_to_FS:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create cephfs subvolumegroup with desired data pool_layout")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1",
                "pool_layout": "cephfs-data-pool-layout",
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1",
                "pool_layout": "cephfs-data-pool-layout-ec",
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info(
            "Create 2 Sub volumes on each of the subvolume group Size 5 GB")
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_3",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_4",
                "group_name": "subvolgroup_ec1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        log.info(
            "Mount 1 subvolumegroup/subvolume on kernel and 1 subvloume on Fuse → Client1"
        )
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_2 subvolgroup_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.strip()}",
        )

        log.info(
            "On EC,Mount 1 subvolumegroup/subvolume on kernal and 1 subvloume on Fuse → Client2"
        )
        if build.startswith("5"):
            kernel_mounting_dir_2 = f"/mnt/cephfs_kernel{mounting_dir}_EC_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of sub volume")

            subvol_path, rc = clients[1].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_3 subvolgroup_ec1",
            )
            fs_util.kernel_mount(
                [clients[1]],
                kernel_mounting_dir_2,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.strip()}",
                extra_params=",fs=cephfs-ec",
            )

            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_4 subvolgroup_ec1",
            )
            fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            fs_util.fuse_mount(
                [clients[1]],
                fuse_mounting_dir_2,
                extra_params=f" -r {subvol_path.strip()} --client_fs cephfs-ec",
            )

        log.info(
            "Check the Pool status before the IO's to confirm if no IO's are going on on the pool attached"
        )
        get_pool_status_before = fs_util.get_pool_df(
            client=clients[0],
            pool_name="cephfs-data-pool-layout",
            vol_name=default_fs)
        get_pool_status_before_EC = fs_util.get_pool_df(
            client=clients[1],
            pool_name="cephfs-data-pool-layout-ec",
            vol_name="cephfs-ec",
        )

        run_ios(clients[0], kernel_mounting_dir_1)
        run_ios(clients[0], fuse_mounting_dir_1)
        run_ios(clients[1], kernel_mounting_dir_2)
        run_ios(clients[1], fuse_mounting_dir_2)

        log.info(
            "Check the Pool status and verify the IO's are going only to the Pool attached"
        )
        get_pool_status_after = fs_util.get_pool_df(
            client=clients[0],
            pool_name="cephfs-data-pool-layout",
            vol_name=default_fs)
        get_pool_status_after_EC = fs_util.get_pool_df(
            client=clients[1],
            pool_name="cephfs-data-pool-layout-ec",
            vol_name="cephfs-ec",
        )

        if get_pool_status_after["used"] < get_pool_status_before["used"]:
            log.error("Pool attached is unused")
            return 1
        if get_pool_status_after_EC["used"] < get_pool_status_before_EC["used"]:
            log.info("EC Pool attached is unused")
            return 1

        log.info("Clean up the system")
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[0]],
                                mounting_dir=kernel_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                kernel_clients=[clients[1]],
                                mounting_dir=kernel_mounting_dir_2)

        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[0]],
                                mounting_dir=fuse_mounting_dir_1)
        fs_util.client_clean_up("umount",
                                fuse_clients=[clients[1]],
                                mounting_dir=fuse_mounting_dir_2)

        clients[1].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")

        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
        log.info(
            "Remove the data pools from the filesystem and delete the created pools."
        )
        rm_pool_from_FS = [
            "ceph fs rm_data_pool cephfs cephfs-data-pool-layout",
            "ceph fs rm_data_pool cephfs-ec cephfs-data-pool-layout-ec",
            "ceph osd pool delete cephfs-data-pool-layout "
            "cephfs-data-pool-layout --yes-i-really-really-mean-it-not-faking",
            "ceph osd pool delete cephfs-data-pool-layout-ec "
            "cephfs-data-pool-layout-ec --yes-i-really-really-mean-it-not-faking",
        ]
        for cmd in rm_pool_from_FS:
            clients[0].exec_command(sudo=True, cmd=cmd)

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. create fs volume create cephfs and cephfs-ec

    Operations :
    1. ceph fs subvolumegroup create <vol_name> <group_name> --mode <octal_value>
    2. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
    3. Mount subvolume on both fuse and kernel clients and run IO's
    4. Add additional Data Pool to the existing FS
    5. Run IO's again.

    Clean-up:
    1. Remove files from mountpoint, Unmount subvolumes.
    2. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
    3. ceph fs subvolumegroup rm <vol_name> <group_name>
    """

    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        if len(clients) < 2:
            log.info(
                f"This test requires minimum 2 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        default_fs = "cephfs"
        if build.startswith("4"):
            # create EC pool
            list_cmds = [
                "ceph fs flag set enable_multiple true",
                "ceph osd pool create cephfs-data-ec 64 erasure",
                "ceph osd pool create cephfs-metadata 64",
                "ceph osd pool set cephfs-data-ec allow_ec_overwrites true",
                "ceph fs new cephfs-ec cephfs-metadata cephfs-data-ec --force",
            ]
            if fs_util.get_fs_info(clients[0], "cephfs_new"):
                default_fs = "cephfs_new"
                list_cmds.append("ceph fs volume create cephfs")
            for cmd in list_cmds:
                clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Create SubVolumeGroups on each filesystem")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_1"
            },
            {
                "vol_name": "cephfs-ec",
                "group_name": "subvolgroup_ec1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(clients[0], **subvolumegroup)

        log.info(
            "Create a Sub volumes on each of the subgroup on different pool with Size 5GB"
        )
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_1",
                "group_name": "subvolgroup_1",
                "size": "5368709120",
            },
            {
                "vol_name": "cephfs-ec",
                "subvol_name": "subvol_2",
                "group_name": "subvolgroup_ec1",
                "size": "5368709120",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)

        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        log.info(
            "Mount 1 subvolume on kernel and 1 subvloume on Fuse → Client1")
        if build.startswith("5"):
            kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of subvolume on default filesystem")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_1,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.strip()}",
                extra_params=f",fs={default_fs}",
            )
            log.info("Get the path of subvolume on EC filesystem")
            fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_2 subvolgroup_ec1",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_1,
                extra_params=f" -r {subvol_path.strip()} --client_fs cephfs-ec",
            )
        else:
            kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
            mon_node_ips = fs_util.get_mon_node_ips()
            log.info("Get the path of subvolume on default filesystem")
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                f"ceph fs subvolume getpath {default_fs} subvol_1 subvolgroup_1",
            )
            fs_util.kernel_mount(
                [clients[0]],
                kernel_mounting_dir_1,
                ",".join(mon_node_ips),
                sub_dir=f"{subvol_path.strip()}",
            )
            log.info("Get the path of subvolume on EC filesystem")
            fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_EC_1/"
            subvol_path, rc = clients[0].exec_command(
                sudo=True,
                cmd=
                "ceph fs subvolume getpath cephfs-ec subvol_2 subvolgroup_ec1",
            )
            fs_util.fuse_mount(
                [clients[0]],
                fuse_mounting_dir_1,
                extra_params=f" -r {subvol_path.strip()}",
            )

        log.info("Run IO's")
        with parallel() as p:
            for i in [
                    kernel_mounting_dir_1,
                    fuse_mounting_dir_1,
            ]:
                p.spawn(fs_util.run_ios, clients[0], i)

        log.info("Create 2 Data Pools, 1 - Replicated , 1 - EC Data Pool")
        create_pools = [
            "ceph osd pool create cephfs-new-data-pool",
            "ceph osd pool create cephfs-new-data-pool-ec erasure",
            "ceph osd pool set cephfs-new-data-pool-ec allow_ec_overwrites true",
        ]
        for cmd in create_pools:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Add created data pools to each of the filesystem")
        add_pool_to_FS = [
            "ceph fs add_data_pool cephfs cephfs-new-data-pool",
            "ceph fs add_data_pool cephfs-ec cephfs-new-data-pool-ec",
        ]
        for cmd in add_pool_to_FS:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Run IO's")
        with parallel() as p:
            for i in [
                    kernel_mounting_dir_1,
                    fuse_mounting_dir_1,
            ]:
                p.spawn(fs_util.run_ios, clients[0], i)

        log.info("Clean up the system")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(clients[0], **subvolume)

        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)

        clients[0].exec_command(
            sudo=True, cmd="ceph config set mon mon_allow_pool_delete true")
        return 0

    except Exception as e:
        log.error(e)
        log.error(traceback.format_exc())
        return 1
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. Create cephfs volume
       creats fs volume create <vol_name>
    2. Disable pg_autoscale_mode for cephfs pools if set
       ceph osd pool set cephfs_data pg_autoscale_mode off
       ceph osd pool set cephfs_metadata pg_autoscale_mode off
    3. Configure 2 clients with Fuse client and another 1 client with kernel client

    Test operation:
    1. Run IO's on both clients
    2. Verify there are no "heartbeat_map" timeout issue in logs
    3. Fill up cluster upto 20%
    4. Change cephfs data and metadata pool pg_num and pgp_num to existing size "-1" with client IO running
    5. Wait for cluster to come to active + clean state , while running IO's
    6. Write some more data to the cluster
    7. Change cephfs data and metadata pool pg_num and pgp_num to existing size "+1" with client IO running
    8. Wait for cluster to come to active + clean state , while running IO's

    Clean-up:
    1. Remove all the data in Cephfs file system
    2. Remove all the cephfs mounts
    3. Reset pg_autoscale_mode for cephfs pools to on
    """
    try:
        tc = "CEPH-83574596"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        rhbuild = config.get("rhbuild")
        build = config.get("build", config.get("rhbuild"))
        mdss = ceph_cluster.get_ceph_objects("mds")

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        mount_points = []
        kernel_mount_dir = "/mnt/" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        fs_util.kernel_mount(clients,
                             kernel_mount_dir,
                             mon_node_ip,
                             new_client_hostname="admin")
        fuse_mount_dir = "/mnt/" + "".join(
            secrets.choice(string.ascii_uppercase + string.digits)
            for i in range(5))
        fs_util.fuse_mount(clients,
                           fuse_mount_dir,
                           new_client_hostname="admin")
        mount_points.extend([kernel_mount_dir, fuse_mount_dir])
        if "4." in rhbuild:
            data_pool = "cephfs_data"
            metadata_pool = "cephfs_metadata"
        else:
            data_pool = "cephfs.cephfs.data"
            metadata_pool = "cephfs.cephfs.meta"
        commands = [
            f"ceph osd pool set {data_pool} pg_autoscale_mode off",
            f"ceph osd pool set {metadata_pool} pg_autoscale_mode off",
        ]
        for command in commands:
            clients[0].exec_command(sudo=True, cmd=command, long_running=True)
        data_pool_pg_num, rc = clients[0].exec_command(
            sudo=True,
            cmd=f"ceph osd pool get {data_pool} pg_num | awk '{{print $2}}'")
        metadata_pool_pg_num, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph osd pool get {metadata_pool} pg_num | awk '{{print $2}}'",
        )
        for num in range(1, 7):
            log.info("Creating Directories")
            out, rc = clients[0].exec_command(sudo=True,
                                              cmd="mkdir %s/%s%d" %
                                              (kernel_mount_dir, "dir", num))
        log.info("Running IO's to get cluster upto 20% capacity")
        no_of_files = "{1..10}"
        commands = [
            f'for n in {no_of_files}; do dd if=/dev/urandom of={kernel_mount_dir}/dir1/file$( printf %03d "$n" )'
            f" bs=1M count=1000; done",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
            f"--files 1000 --files-per-dir 10 --top {kernel_mount_dir}/dir2",
        ]
        for command in commands:
            clients[0].exec_command(sudo=True, cmd=command, long_running=True)
        log.info("Checking for heartbeat map timeout issue")
        rc = fs_util.heartbeat_map(mdss[0])
        if rc == 1:
            log.error("heartbeat map timeout issue found")
            return 1
        log.info(
            "Changing cephfs data and metadata pool pg_num and pgp_num to existing size '-1' with client IO running"
        )
        no_of_files = "{1..1000}"
        data_pool_pg_num = str(int(data_pool_pg_num) - 1)
        metadata_pool_pg_num = str(int(metadata_pool_pg_num) - 1)
        commands = [
            f'for n in {no_of_files}; do dd if=/dev/urandom of={kernel_mount_dir}/dir3/file$( printf %03d "$n" )'
            f" bs=1M count=10; done",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
            f"--files 1000 --files-per-dir 10 --top {kernel_mount_dir}/dir4",
            f"ceph osd pool set {data_pool} pg_num {data_pool_pg_num}",
            f"ceph osd pool set {data_pool} pgp_num {data_pool_pg_num}",
            f"ceph osd pool set {metadata_pool} pg_num {metadata_pool_pg_num}",
            f"ceph osd pool set {metadata_pool} pgp_num {metadata_pool_pg_num}",
        ]
        with parallel() as p:
            for num in range(0, 6):
                p.spawn(clients[0].exec_command, sudo=True, cmd=commands[num])
                time.sleep(1)
        log.info("Verifying pgs are in active+clean state")
        rc = check_clean_pgs(clients)
        if rc == 1:
            return 1
        log.info(
            "Change cephfs data and metadata pool pg_num and pgp_num to existing size '+1' with client IO running"
        )
        data_pool_pg_num = str(int(data_pool_pg_num) + 1)
        metadata_pool_pg_num = str(int(metadata_pool_pg_num) + 1)
        commands = [
            f'for n in {no_of_files}; do     dd if=/dev/urandom of={kernel_mount_dir}/dir5/file$( printf %03d "$n" )'
            f" bs=1M count=10; done",
            f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
            f"--files 1000 --files-per-dir 10 --top {kernel_mount_dir}/dir6",
            f"ceph osd pool set {data_pool} pg_num {data_pool_pg_num}",
            f"ceph osd pool set {data_pool} pgp_num {data_pool_pg_num}",
            f"ceph osd pool set {metadata_pool} pg_num {metadata_pool_pg_num}",
            f"ceph osd pool set {metadata_pool} pgp_num {metadata_pool_pg_num}",
        ]
        with parallel() as p:
            for num in range(0, 6):
                p.spawn(clients[0].exec_command, sudo=True, cmd=commands[num])
                time.sleep(1)
        log.info("Verifying pgs are in active+clean state")
        rc = check_clean_pgs(clients)
        if rc == 1:
            return 1
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up the system")
        out, rc = clients[0].exec_command(sudo=True,
                                          cmd=f"rm -rf {mount_points[1]}/*")
        for mount_point in mount_points:
            clients[0].exec_command(sudo=True, cmd=f"umount {mount_point}")
            if "4." in rhbuild:
                commands = [
                    f"ceph osd pool set {data_pool} pg_autoscale_mode warn",
                    f"ceph osd pool set {metadata_pool} pg_autoscale_mode warn",
                ]
                for command in commands:
                    clients[0].exec_command(sudo=True,
                                            cmd=command,
                                            long_running=True)
            else:
                commands = [
                    f"ceph osd pool set {data_pool} pg_autoscale_mode on",
                    f"ceph osd pool set {metadata_pool} pg_autoscale_mode on",
                ]
                for command in commands:
                    clients[0].exec_command(sudo=True,
                                            cmd=command,
                                            long_running=True)
        for mount_point in mount_points:
            clients[0].exec_command(sudo=True, cmd=f"rm -rf {mount_point}")
예제 #27
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered :
    CEPH-83573400   Test to validate the increase in quota limit once it reaches the max limit. (files)
                     Create a FS and create 10 directories and mount them on kernel client and fuse client(5 mounts
                     each). Set max file quota to a number(say 50) and add up to that number of files to that directory
                     and verify if the set quota limit is working fine.
                     Increase the set quota limit to more that what was set
                     earlier and add more files and verify.
                     Similarly set different limit on different directories, increase
                     the limit and verify it’s functionality and verify quota
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    2. create fs volume create cephfs if the volume is not there
    3. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvolgroup_clone_attr_vol_1
    4. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_1 --size 5368706371 --group_name subvolgroup_1
    5. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1

    Test Case Flow:
    1. Mount the subvolume_1 on the client using fuse
    2. Mount the subvolume_2 on the client using kernel
    3. set file attribute 10 on both mount points
    4. Create 11 files and check it fails at 11 iteration
    5. Perform same on kernel mount
    6. Create a directory inside fuse mount and set file attribute and verify
    7. Create a directory inside kernel mount and set file attribute and verify
    8. Increase the quota of files to 20 and try creating the files on the same directory used in step 3
    9. Perorm same on Kernel mount
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {
                "vol_name": default_fs,
                "group_name": "subvolgroup_quota_file_increase_1"
            },
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume_list = [
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_file_incr_fuse",
                "group_name": "subvolgroup_quota_file_increase_1",
                "size": "5368706371",
            },
            {
                "vol_name": default_fs,
                "subvol_name": "subvol_file_incr_kernel",
                "group_name": "subvolgroup_quota_file_increase_1",
                "size": "5368706371",
            },
        ]
        for subvolume in subvolume_list:
            fs_util.create_subvolume(clients[0], **subvolume)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        log.info("Get the path of sub volume")
        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_file_incr_kernel subvolgroup_quota_file_increase_1",
        )
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.strip()}",
        )

        subvol_path, rc = clients[0].exec_command(
            sudo=True,
            cmd=
            f"ceph fs subvolume getpath {default_fs} subvol_file_incr_fuse subvolgroup_quota_file_increase_1",
        )
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [clients[0]],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path.strip()}",
        )
        fs_util.set_quota_attrs(clients[0], 50, 10000000, fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.file_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        log.info("Increasing the quota to 100 and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 100, 10000000, fuse_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0], fuse_mounting_dir_1)
        fs_util.file_quota_test(clients[0], fuse_mounting_dir_1, quota_attrs)

        fs_util.set_quota_attrs(clients[0], 50, 10000000,
                                kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.file_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        log.info("Increasing the quota to 100 and validating file quota attr")
        fs_util.set_quota_attrs(clients[0], 100, 10000000,
                                kernel_mounting_dir_1)
        quota_attrs = fs_util.get_quota_attrs(clients[0],
                                              kernel_mounting_dir_1)
        fs_util.file_quota_test(clients[0], kernel_mounting_dir_1, quota_attrs)

        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1

    finally:
        log.info("Clean Up in progess")
        for subvolume in subvolume_list:
            fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1,
                                          **subvolumegroup,
                                          force=True)
예제 #28
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-11319	Create first snap add more data to original then create a second snap.
                Rollback 1st snap do data validation.
                Rollback 2nd snap and do data validation.Perform cross platform rollback
                i.e. take snap on kernel mount and perform rollback using fuse mount
    Pre-requisites :
    1. We need atleast one client node to execute this test case
    1. creats fs volume create cephfs if the volume is not there
    2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
        Ex : ceph fs subvolumegroup create cephfs subvol_cross_platform_snapshot_1
    3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
       [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]  [--namespace-isolated]
       Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_
    4. Create Data on the subvolume. We will add known data as we are going to verify the files
        Ex: create_file_data()
    5. Create snapshot of the subvolume
        Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvol_cross_platform_snapshot_1

    Script Flow:
    1. Mount the subvolume on the client using Kernel and fuse mount
    2. Write data into the fuse mount point i.e., data_from_fuse_mount
    3. Collect the checksum of the files
    4. Take snapshot at this point i.e., snap_1
    5. Write data into the kernel mount point i.e., data_from_kernel_mount
    6. Collect the checksum of the files
    7. Take snapshot at this point i.e., snap_2
    8. On Kernel mount revert the snapshot to snap_1 and compare the checksum of the files collected in step 3
    9. On Fuse mount revert the snapshot to snap_2 and compare the checksum of the files colleced in step 6

    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        default_fs = "cephfs"
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        subvolumegroup_list = [
            {"vol_name": default_fs, "group_name": "subvol_cross_platform_snapshot_1"},
        ]
        for subvolumegroup in subvolumegroup_list:
            fs_util.create_subvolumegroup(client1, **subvolumegroup)
        subvolume = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "group_name": "subvol_cross_platform_snapshot_1",
            "size": "5368706371",
        }
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath {default_fs} subvol_cross_platform_snapshot"
            f" subvol_cross_platform_snapshot_1",
        )
        subvol_path = subvol_path.strip()
        fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
        fs_util.fuse_mount(
            [client1],
            fuse_mounting_dir_1,
            extra_params=f" -r {subvol_path}",
        )
        fs_util.create_file_data(
            client1, fuse_mounting_dir_1, 3, "snap1", "data_from_fuse_mount "
        )
        fuse_files_checksum = fs_util.get_files_and_checksum(
            client1, fuse_mounting_dir_1
        )
        fuse_snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "snap_name": "snap_1",
            "group_name": "subvol_cross_platform_snapshot_1",
        }
        fs_util.create_snapshot(client1, **fuse_snapshot)
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(
            [clients[0]],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path}",
        )
        fs_util.create_file_data(
            client1, kernel_mounting_dir_1, 3, "snap1", "data_from_kernel_mount "
        )
        kernel_snapshot = {
            "vol_name": default_fs,
            "subvol_name": "subvol_cross_platform_snapshot",
            "snap_name": "snap_2",
            "group_name": "subvol_cross_platform_snapshot_1",
        }
        fs_util.create_snapshot(client1, **kernel_snapshot)
        kernel_files_checksum = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1
        )
        client1.exec_command(
            sudo=True, cmd=f"cd {kernel_mounting_dir_1};cp .snap/_snap_1_*/* ."
        )
        kernel_mount_revert_snap_fuse = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1
        )
        if fuse_files_checksum != kernel_mount_revert_snap_fuse:
            log.error(
                "checksum is not when reverted to snap1 i.e., from fuse mount snapshot revert"
            )
            return 1
        client1.exec_command(
            sudo=True, cmd=f"cd {fuse_mounting_dir_1};cp .snap/_snap_2_*/* ."
        )
        fuse_mount_revert_snap_kernel = fs_util.get_files_and_checksum(
            client1, kernel_mounting_dir_1
        )
        if kernel_files_checksum != fuse_mount_revert_snap_kernel:
            log.error(
                "checksum is not when reverted to snap2 i.e., from kernel mount snapshot revert"
            )
            return 1
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Clean Up in progess")
        fs_util.remove_snapshot(client1, **kernel_snapshot)
        fs_util.remove_snapshot(client1, **fuse_snapshot)
        fs_util.remove_subvolume(client1, **subvolume)
        for subvolumegroup in subvolumegroup_list:
            fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True)
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. Create 2 cephfs volume
       creats fs volume create <vol_name>

    Test operation:
    1. Mount both cephfs with same auth_key(client)
    2. Run IO's on both cephfs mounts

    Clean-up:
    1. Remove all the data in Cephfs file system
    2. Remove all the cephfs mounts
    """
    try:
        tc = "CEPH-83573877"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        mount_points = []
        multiple_cephfs = ["cephfs", "cephfs-ec"]
        for fs_name in multiple_cephfs:
            log.info(f"Mounting {fs_name} on fuse & kernel client with same auth_key")
            kernel_mount_dir = "/mnt/kernel_" + "".join(
                secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
            )
            fs_util.kernel_mount(
                clients,
                kernel_mount_dir,
                mon_node_ip,
                new_client_hostname="admin",
                extra_params=f",fs={fs_name}",
            )
            fuse_mount_dir = "/mnt/fuse_" + "".join(
                secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
            )
            mount_points.extend([kernel_mount_dir, fuse_mount_dir])
            fs_util.fuse_mount(
                clients,
                fuse_mount_dir,
                new_client_hostname="admin",
                extra_params=f" --client_fs {fs_name}",
            )
            log.info(f"Running IO's on {fs_name}")
            commands = [
                f"mkdir {kernel_mount_dir}/dir1 {fuse_mount_dir}/dir2",
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --top {kernel_mount_dir}/dir1",
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --top {kernel_mount_dir}/dir1",
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --top {fuse_mount_dir}/dir2",
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --top {fuse_mount_dir}/dir2",
            ]
            for command in commands:
                _, err = client1.exec_command(sudo=True, cmd=command, long_running=True)
                if err:
                    return 1
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up the system")
        client1.exec_command(sudo=True, cmd=f"rm -rf {mount_points[0]}/*")
        client1.exec_command(sudo=True, cmd=f"rm -rf {mount_points[2]}/*")
        for client in clients:
            for mount_point in mount_points:
                client.exec_command(sudo=True, cmd=f"umount {mount_point}")
예제 #30
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites :
    1. Create a subvolume with Create a subvolume with sufficient data (around 500 files of 1 MB each)
    2. Create a snapshot of the above subvolume
    3. Create 4 number of clones from above snapshot

    Test operation:
    1. When the clone is in 'in-progress' state, delete the all the clone subvolumes with force option.
    2. Check if clone operation status is in 'in-progress' state
    3. Writing sufficient data in step 1 would provide enough time for you achieve that
    4. Try to delete the subvolume of the clone in 'in-progress' state
    5. The subvolume should not be able to be deleted
    6. Try to cancel the cloning
    7. After canceling the cloning, it should be able to delete the subvolume
    """
    try:
        bz = "1980920"
        tc = "CEPH-83574681"
        fs_util = FsUtils(ceph_cluster)
        log.info(f"Running CephFS tests for BZ-{bz}")
        log.info(f"Running CephFS tests for BZ-{tc}")
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        create_cephfs = "ceph fs volume create cephfs"
        client1.exec_command(sudo=True, cmd=create_cephfs)
        subvolume_name_generate = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(5)))
        subvolume = {
            "vol_name": "cephfs",
            "subvol_name": f"subvol_{subvolume_name_generate}",
            "size": "5368706371",
        }
        subvolume_name = subvolume["subvol_name"]
        fs_util.create_subvolume(client1, **subvolume)
        log.info("Get the path of sub volume")
        subvol_path, rcc = client1.exec_command(
            sudo=True,
            cmd=f"ceph fs subvolume getpath cephfs {subvolume_name}")
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10)))
        kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.auth_list([client1])
        fs_util.kernel_mount(
            [client1],
            kernel_mounting_dir_1,
            ",".join(mon_node_ips),
            sub_dir=f"{subvol_path.read().decode().strip()}",
        )
        for i in range(250):
            client1.exec_command(
                sudo=True,
                cmd=f"dd if=/dev/zero of={kernel_mounting_dir_1}" + str(i) +
                ".txt bs=1 count=0 seek=5M",
                long_running=True,
            )
        log.info("Checking Pre-requisites")
        fs_util.create_snapshot(client1, "cephfs", subvolume_name,
                                f"subvol_1_snap{subvolume_name}")
        for i in range(1, 4):
            new_subvolume_name = f"subvol_1_snap_clone{subvolume_name}{str(i)}"
            fs_util.create_clone(
                client1,
                "cephfs",
                subvolume_name,
                f"subvol_1_snap{subvolume_name}",
                new_subvolume_name,
            )
            out1, err1 = client1.exec_command(
                sudo=True,
                cmd=f"ceph fs clone status cephfs {new_subvolume_name}")
            output1 = json.loads(out1.read().decode())
            output2 = output1["status"]["state"]
            log.info(new_subvolume_name + " status: " + str(output2))
            result, error = client1.exec_command(
                sudo=True,
                cmd=f"ceph fs subvolume rm cephfs {new_subvolume_name} --force",
                check_ec=False,
            )
            log.info("Subvolume Remove Executed")
            error_result = error.read().decode()
            if "clone in-progress" in error_result:
                log.info("Clone is in-progress as expected")
            if output2 == "in-progress":
                client1.exec_command(
                    sudo=True,
                    cmd=f"ceph fs clone cancel cephfs {new_subvolume_name}")
            result2, error2 = client1.exec_command(
                sudo=True,
                cmd=f"ceph fs clone status cephfs {new_subvolume_name}")
            out1 = json.loads(result2.read().decode())
            out2 = out1["status"]["state"]
            if out2 == "canceled":
                fs_util.remove_subvolume(client1,
                                         "cephfs",
                                         new_subvolume_name,
                                         force=True)
        fs_util.remove_subvolume(client1, "cephfs", subvolume_name)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1