예제 #1
0
def run(ceph_cluster, **kw):
    try:
        log.info(f"MetaData Information {log.metadata} in {__name__}")
        tc = "nfs-ganesha"
        nfs_mounting_dir = "/mnt/nfs/"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        rhbuild = config.get("rhbuild")
        if "5." in rhbuild:
            from tests.cephfs.cephfs_utilsV1 import FsUtils

            fs_util = FsUtils(ceph_cluster)
            nfs_server = ceph_cluster.get_ceph_objects("nfs")
            nfs_client = ceph_cluster.get_ceph_objects("client")
            fs_util.auth_list(nfs_client)
            nfs_name = "cephfs-nfs"
            out, rc = nfs_client[0].exec_command(
                sudo=True, cmd="ceph fs ls | awk {' print $2'} ")
            fs_name = out.rstrip()
            fs_name = fs_name.strip(",")
            nfs_export_name = "/export1"
            path = "/"
            nfs_server_name = nfs_server[0].node.hostname
            # Create ceph nfs cluster
            nfs_client[0].exec_command(sudo=True,
                                       cmd="ceph mgr module enable nfs")
            out, rc = nfs_client[0].exec_command(
                sudo=True,
                cmd=f"ceph nfs cluster create {nfs_name} {nfs_server_name}")
            # Verify ceph nfs cluster is created
            if wait_for_process(client=nfs_client[0],
                                process_name=nfs_name,
                                ispresent=True):
                log.info("ceph nfs cluster created successfully")
            else:
                raise CommandFailed("Failed to create nfs cluster")
            # Create cephfs nfs export
            if "5.0" in rhbuild:
                nfs_client[0].exec_command(
                    sudo=True,
                    cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name} "
                    f"{nfs_export_name} path={path}",
                )
            else:
                nfs_client[0].exec_command(
                    sudo=True,
                    cmd=f"ceph nfs export create cephfs {nfs_name} "
                    f"{nfs_export_name} {fs_name} path={path}",
                )

            # Verify ceph nfs export is created
            out, rc = nfs_client[0].exec_command(
                sudo=True, cmd=f"ceph nfs export ls {nfs_name}")
            if nfs_export_name in out:
                log.info("ceph nfs export created successfully")
            else:
                raise CommandFailed("Failed to create nfs export")
            # Mount ceph nfs exports
            nfs_client[0].exec_command(sudo=True,
                                       cmd=f"mkdir -p {nfs_mounting_dir}")
            assert wait_for_cmd_to_succeed(
                nfs_client[0],
                cmd=
                f"mount -t nfs -o port=2049 {nfs_server_name}:{nfs_export_name} {nfs_mounting_dir}",
            )
            nfs_client[0].exec_command(
                sudo=True,
                cmd=
                f"mount -t nfs -o port=2049 {nfs_server_name}:{nfs_export_name} {nfs_mounting_dir}",
            )
            out, rc = nfs_client[0].exec_command(cmd="mount")
            mount_output = out.split()
            log.info("Checking if nfs mount is is passed of failed:")
            assert nfs_mounting_dir.rstrip("/") in mount_output
            log.info("Creating Directory")
            out, rc = nfs_client[0].exec_command(
                sudo=True, cmd=f"mkdir {nfs_mounting_dir}{dir_name}")
            nfs_client[0].exec_command(
                sudo=True,
                cmd=
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
                f"{nfs_mounting_dir}{dir_name}",
                long_running=True,
            )
            nfs_client[0].exec_command(
                sudo=True,
                cmd=
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
                f"{nfs_mounting_dir}{dir_name}",
                long_running=True,
            )
            # Unmount nfs
            nfs_client[0].exec_command(sudo=True,
                                       cmd=f"umount {nfs_mounting_dir}")
            # Delete cephfs nfs export
            nfs_client[0].exec_command(
                sudo=True,
                cmd=f"ceph nfs export delete {nfs_name} {nfs_export_name}")
            # Verify cephfs nfs export is deleted
            out, rc = nfs_client[0].exec_command(
                sudo=True, cmd=f"ceph nfs export ls {nfs_name}")

            if nfs_export_name not in out:
                log.info("cephf nfs export deleted successfully")
            else:
                raise CommandFailed("Failed to delete cephfs nfs export")
            # Delete nfs cluster
            nfs_client[0].exec_command(
                sudo=True, cmd=f"ceph nfs cluster delete {nfs_name}")
            # Adding Delay to reflect in cluster list
            time.sleep(5)
            if not wait_for_process(client=nfs_client[0],
                                    process_name=nfs_name,
                                    ispresent=False):
                raise CommandFailed("Cluster has not been deleted")
            # Verify nfs cluster is deleted
            out, rc = nfs_client[0].exec_command(sudo=True,
                                                 cmd="ceph nfs cluster ls")
            if nfs_name not in out:
                log.info("ceph nfs cluster deleted successfully")
            else:
                raise CommandFailed("Failed to delete nfs cluster")

        else:
            from tests.cephfs.cephfs_utils import FsUtils

            fs_util = FsUtils(ceph_cluster)
            client_info, rc = fs_util.get_clients(build)
            if rc == 0:
                log.info("Got client info")
            else:
                raise CommandFailed("fetching client info failed")
            nfs_server = [client_info["kernel_clients"][0]]
            nfs_client = [client_info["kernel_clients"][1]]
            rc1 = fs_util.auth_list(nfs_server)
            rc2 = fs_util.auth_list(nfs_client)
            print(rc1, rc2)
            if rc1 == 0 and rc2 == 0:
                log.info("got auth keys")
            else:
                raise CommandFailed("auth list failed")
            rc = fs_util.nfs_ganesha_install(nfs_server[0])
            if rc == 0:
                log.info("NFS ganesha installed successfully")
            else:
                raise CommandFailed("NFS ganesha installation failed")
            rc = fs_util.nfs_ganesha_conf(nfs_server[0], "admin")
            if rc == 0:
                log.info("NFS ganesha config added successfully")
            else:
                raise CommandFailed("NFS ganesha config adding failed")
            rc = fs_util.nfs_ganesha_mount(nfs_client[0], nfs_mounting_dir,
                                           nfs_server[0].node.hostname)
            if rc == 0:
                log.info("NFS-ganesha mount passed")
            else:
                raise CommandFailed("NFS ganesha mount failed")

            mounting_dir = nfs_mounting_dir + "ceph/"
            out, rc = nfs_client[0].exec_command(
                sudo=True, cmd=f"mkdir {mounting_dir}{dir_name}")
            nfs_client[0].exec_command(
                sudo=True,
                cmd=
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
                f"{mounting_dir}{dir_name}",
                long_running=True,
            )
            nfs_client[0].exec_command(
                sudo=True,
                cmd=
                f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation read --threads 10 --file-size 4 "
                f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
                f"{mounting_dir}{dir_name}",
                long_running=True,
            )
            log.info("Cleaning up")
            nfs_client[0].exec_command(sudo=True,
                                       cmd=f"rm -rf {mounting_dir}*")
            log.info("Unmounting nfs-ganesha mount on client:")
            nfs_client[0].exec_command(sudo=True,
                                       cmd=" umount %s -l" %
                                       (nfs_mounting_dir))
            log.info("Removing nfs-ganesha mount dir on client:")
            nfs_client[0].exec_command(sudo=True,
                                       cmd="rm -rf  %s" % (nfs_mounting_dir))

            log.info("Cleaning up successfull")
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
예제 #2
0
def run(ceph_cluster, **kw):
    """
    CEPH-83574028 - Ensure the path of the nfs export is displayed properly.
    Pre-requisites:
    1. Create cephfs volume
       creats fs volume create <vol_name>
    2. Create nfs cluster
       ceph nfs cluster create <nfs_name> <nfs_server>

    Test operation:
    1. Create cephfs nfs export
       ceph nfs export create cephfs <fs_name> <nfs_name> <nfs_export_name> path=<export_path>
    2. Verify path of cephfs nfs export
       ceph nfs export get <nfs_name> <nfs_export_name>

    Clean-up:
    1. Remove cephfs nfs export
    """
    try:
        tc = "CEPH-83574028"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        rhbuild = config.get("rhbuild")
        nfs_servers = ceph_cluster.get_ceph_objects("nfs")
        nfs_server = nfs_servers[0].node.hostname
        nfs_name = "cephfs-nfs"
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        client1.exec_command(sudo=True, cmd="ceph mgr module enable nfs")
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ceph nfs cluster create {nfs_name} {nfs_server}")
        if wait_for_process(client=client1,
                            process_name=nfs_name,
                            ispresent=True):
            log.info("ceph nfs cluster created successfully")
        else:
            raise CommandFailed("Failed to create nfs cluster")
        nfs_export_name = "/export_" + "".join(
            secrets.choice(string.digits) for i in range(3))
        export_path = "/"
        fs_name = "cephfs"
        if "5.0" in rhbuild:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {fs_name} {nfs_name} "
                f"{nfs_export_name} path={export_path}",
            )
        else:
            client1.exec_command(
                sudo=True,
                cmd=f"ceph nfs export create cephfs {nfs_name} "
                f"{nfs_export_name} {fs_name} path={export_path}",
            )
        out, rc = client1.exec_command(sudo=True,
                                       cmd=f"ceph nfs export ls {nfs_name}")

        if nfs_export_name not in out:
            raise CommandFailed("Failed to create nfs export")

        log.info("ceph nfs export created successfully")
        out, rc = client1.exec_command(
            sudo=True, cmd=f"ceph nfs export get {nfs_name} {nfs_export_name}")
        output = json.loads(out)
        export_get_path = output["path"]
        if export_get_path != export_path:
            log.error("Export path is not correct")
            return 1

        log.info("Test completed successfully")
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up")
        client1.exec_command(
            sudo=True,
            cmd=f"ceph nfs export delete {nfs_name} {nfs_export_name}",
            check_ec=False,
        )
예제 #3
0
def run(ceph_cluster, **kw):
    """
    Pre-requisites:
    1. Create 3 cephfs volume
       creats fs volume create <vol_name>
       ceph orch apply mds <fs_name> --placement='<no. of mds> <mds_nodes...>'

    Test operation:
    1. Create client1 restricted to first cephfs
       ceph fs authorize <fs_name> client.<client_id> <path-in-cephfs> rw
    2. Create client2 restricted to second cephfs
    3. Create client3 restricted to third cephfs
    4. Get filesystem information using client1
    5. Ensure only first cephfs info is shown
    6. Get filesystem information using client2
    7. Ensure only second cephfs info is shown
    8. Get filesystem information using client3
    9. Ensure only third cephfs info is shown

    Clean-up:
    1. Remove third cephfs
    2. Remove all the cephfs mounts
    3. Remove all the clients
    """
    try:
        tc = "CEPH-83573875"
        log.info(f"Running cephfs {tc} test case")

        config = kw["config"]
        build = config.get("build", config.get("rhbuild"))
        mdss = ceph_cluster.get_ceph_objects("mds")

        fs_util = FsUtils(ceph_cluster)
        clients = ceph_cluster.get_ceph_objects("client")
        client1 = clients[0]
        mds1 = mdss[0].node.hostname
        mds2 = mdss[1].node.hostname
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mon_node_ip = fs_util.get_mon_node_ips()
        mon_node_ip = ",".join(mon_node_ip)
        fs1 = "cephfs"
        fs2 = "cephfs-ec"
        fs3 = "Ceph_fs_new"
        commands = [
            f"ceph fs volume create {fs3}",
            f"ceph orch apply mds {fs3} --placement='2 {mds1} {mds2}'",
        ]
        for command in commands:
            _, err = clients[0].exec_command(sudo=True,
                                             cmd=command,
                                             long_running=True)
            if err:
                return 1
            wait_for_process(client=clients[0],
                             process_name=fs3,
                             ispresent=True)
        log.info(f"Creating client authorized to {fs1}")
        fs_util.fs_client_authorize(client1, fs1, "client1", "/", "rw")
        log.info(f"Creating client authorized to {fs2}")
        fs_util.fs_client_authorize(client1, fs2, "client2", "/", "rw")
        log.info(f"Creating client authorized to {fs3}")
        fs_util.fs_client_authorize(client1, fs3, "client3", "/", "rw")
        log.info("Verifying file system information for client1")
        command = (
            "ceph auth get client.client1 -o /etc/ceph/ceph.client.client1.keyring"
        )
        client1.exec_command(sudo=True, cmd=command)
        command = "ceph fs ls -n client.client1 -k /etc/ceph/ceph.client.client1.keyring --format json"
        out, rc = client1.exec_command(sudo=True, cmd=command)
        output = json.loads(out.read().decode())
        validate_fs_info(fs1, output)
        log.info("Verifying file system information for client2")
        command = (
            "ceph auth get client.client2 -o /etc/ceph/ceph.client.client2.keyring"
        )
        client1.exec_command(sudo=True, cmd=command)
        command = "ceph fs ls -n client.client2 -k /etc/ceph/ceph.client.client2.keyring --format json"
        out, rc = client1.exec_command(sudo=True, cmd=command)
        output = json.loads(out.read().decode())
        validate_fs_info(fs2, output)
        log.info("Verifying file system information for client3")
        command = (
            "ceph auth get client.client3 -o /etc/ceph/ceph.client.client3.keyring"
        )
        client1.exec_command(sudo=True, cmd=command)
        command = "ceph fs ls -n client.client3 -k /etc/ceph/ceph.client.client3.keyring --format json"
        out, rc = client1.exec_command(sudo=True, cmd=command)
        output = json.loads(out.read().decode())
        validate_fs_info(fs3, output)
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        log.info("Cleaning up the system")
        commands = [
            "ceph config set mon mon_allow_pool_delete true",
            f"ceph fs volume rm {fs3} --yes-i-really-mean-it",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)

        for num in range(1, 4):
            client1.exec_command(sudo=True,
                                 cmd=f"ceph auth rm client.client{num}")
def run(ceph_cluster, results=None, **kw):
    """
    An arbitrary pool added to the volume is removed successfully on volume removal.
    Pre-requisites :
    1 Create a Pool
    2 Create fs volume using the created pool.
    3 Remove the volume and verify if pool associated with the volume is also removed.
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        log.info("checking Pre-requisites")
        results = []
        if len(clients) < 1:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        if build.startswith("4"):
            clients[0].exec_command(
                sudo=True,
                cmd="ceph fs flag set enable_multiple true",
            )
        log.info("Create FS's and add arbitrary data pool")
        create_fs_pools = [
            "ceph fs volume create cephfs_new",
            "ceph osd pool create cephfs_new-data-ec 64 erasure",
            "ceph osd pool create cephfs_new-metadata 64",
            "ceph osd pool set cephfs_new-data-ec allow_ec_overwrites true",
            "ceph fs new cephfs_new-ec cephfs_new-metadata cephfs_new-data-ec --force",
            "ceph osd pool create cephfs-data-pool-arbitrary",
            "ceph osd pool create cephfs-data-pool-arbitrary-ec 64 erasure",
            "ceph osd pool set cephfs-data-pool-arbitrary-ec allow_ec_overwrites true",
        ]
        for cmd in create_fs_pools:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Add created data pools to each of the filesystem")
        add_pool_to_FS = [
            "ceph fs add_data_pool cephfs_new cephfs-data-pool-arbitrary",
            "ceph fs add_data_pool cephfs_new-ec cephfs-data-pool-arbitrary-ec",
        ]
        for cmd in add_pool_to_FS:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info("Remove the FS")
        rm_fs = [
            "ceph config set mon mon_allow_pool_delete true",
            "ceph fs volume rm cephfs_new --yes-i-really-mean-it",
            "ceph fs volume rm cephfs_new-ec --yes-i-really-mean-it",
        ]
        for cmd in rm_fs:
            clients[0].exec_command(sudo=True, cmd=cmd)

        log.info(
            "Verify if arbitrary pool is also removed along with removal of FS Volume"
        )
        verify_fs_removal = [
            "ceph fs ls | grep cephfs",
            "ceph fs ls | grep cephfs-ec",
            "ceph osd lspools | grep cephfs.cephfs-data-pool-arbitrary",
            "ceph osd lspools | grep cephfs.cephfs-data-pool-arbitrary-ec",
        ]
        for cmd in verify_fs_removal:
            clients[0].exec_command(sudo=True, cmd=cmd, check_ec=False)
            if clients[0].node.exit_status == 1:
                results.append(f"{cmd} successfully executed")
        wait_for_process(clients[0], "cephfs_new", ispresent=False)
        wait_for_process(clients[0], "cephfs_new-ec", ispresent=False)
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1