Example #1
0
def run(ceph_cluster, **kw):
    try:
        log.info(f"MetaData Information {log.metadata} in {__name__}")
        fs_util = FsUtils(ceph_cluster)

        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        clients = ceph_cluster.get_ceph_objects("client")
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        mounting_dir = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in list(range(10))
        )
        fuse_mounting_dir = f"/mnt/cephfs_fuse{mounting_dir}/"
        fs_util.fuse_mount(clients, fuse_mounting_dir)

        mount_test_case(clients, fuse_mounting_dir)

        kernel_mounting_dir = f"/mnt/cephfs_kernel{mounting_dir}/"
        mon_node_ips = fs_util.get_mon_node_ips()
        fs_util.kernel_mount(clients, kernel_mounting_dir, ",".join(mon_node_ips))

        mount_test_case(clients, kernel_mounting_dir)

        log.info("Cleaning up!-----")
        rc = fs_util.client_clean_up(
            [],
            clients,
            kernel_mounting_dir,
            "umount",
        )
        if rc != 0:
            raise CommandFailed("fuse clients cleanup failed")
        log.info("Fuse clients cleaned up successfully")

        rc = fs_util.client_clean_up(
            clients,
            [],
            fuse_mounting_dir,
            "umount",
        )
        if rc != 0:
            raise CommandFailed("kernel clients cleanup failed")
        log.info("kernel clients cleaned up successfully")
        return 0

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    def remove_subvolumegroup(
        self, client, vol_name, group_name, validate=True, **kwargs
    ):
        """
        Removes the sub volume group with the group_name,vol_name as argument
        It supports below optional arguments also
        Args:
            client:
            vol_name:
            group_name:
            validate:
            **kwargs:
                force
                check_ec : boolean

        Returns:
            Returns the cmd_out and cmd_rc for remove cmd
        """
        rmsubvolumegroup_cmd = f"ceph fs subvolumegroup rm {vol_name} {group_name}"
        if kwargs.get("force"):
            rmsubvolumegroup_cmd += " --force"
        cmd_out, cmd_rc = client.exec_command(
            sudo=True, cmd=rmsubvolumegroup_cmd, check_ec=kwargs.get("check_ec", True)
        )
        if validate:
            out, rc = client.exec_command(
                sudo=True, cmd=f"ceph fs subvolumegroup ls {vol_name} --format json"
            )
            subvolumegroup_ls = json.loads(out.read().decode())
            if group_name in [i["name"] for i in subvolumegroup_ls]:
                raise CommandFailed(f"Deletion of subvolume group: {group_name} failed")
        return cmd_out, cmd_rc
Example #3
0
    def remove_subvolume(self, client, vol_name, subvol_name, validate=True, **kwargs):
        """

        Args:
            client:
            vol_name:
            subvol_name:
            validate:
            **kwargs:
                group_name : str
                retain-snapshots : boolean
                force : boolean

        Returns:

        """
        rmsubvolume_cmd = f"ceph fs subvolume rm {vol_name} {subvol_name}"
        if kwargs.get("group_name"):
            rmsubvolume_cmd += f" --group_name {kwargs.get('group_name')}"
        if kwargs.get("retain-snapshots"):
            rmsubvolume_cmd += " --retain-snapshots"
        if kwargs.get("force"):
            rmsubvolume_cmd += " --force"
        client.exec_command(sudo=True, cmd=rmsubvolume_cmd)
        if validate:
            listsubvolumes_cmd = f"ceph fs subvolume ls {vol_name}"
            if kwargs.get("group_name"):
                listsubvolumes_cmd += f" --group_name {kwargs.get('group_name')}"
            out, rc = client.exec_command(
                sudo=True, cmd=f"{listsubvolumes_cmd} --format json"
            )
            subvolume_ls = json.loads(out.read().decode())
            if subvol_name in [i["name"] for i in subvolume_ls]:
                raise CommandFailed(f"Deletion of clone : {subvol_name} failed")
Example #4
0
    def create_snapshot(
        self, client, vol_name, subvol_name, snap_name, validate=True, **kwargs
    ):
        """

        Args:
            client:
            vol_name:
            subvol_name:
            snap_name:
            validate:
            **kwargs:
                group_name : str
        Returns:

        """
        snapshot_cmd = (
            f"ceph fs subvolume snapshot create {vol_name} {subvol_name} {snap_name}"
        )
        if kwargs.get("group_name"):
            snapshot_cmd += f" --group_name {kwargs.get('group_name')}"
        client.exec_command(sudo=True, cmd=snapshot_cmd)
        if validate:
            listsnapshot_cmd = f"ceph fs subvolume snapshot ls {vol_name} {subvol_name}"
            if kwargs.get("group_name"):
                listsnapshot_cmd += f" --group_name {kwargs.get('group_name')}"
            out, rc = client.exec_command(
                sudo=True, cmd=f"{listsnapshot_cmd} --format json"
            )
            snapshot_ls = json.loads(out.read().decode())
            if snap_name not in [i["name"] for i in snapshot_ls]:
                raise CommandFailed(f"Creation of subvolume : {snap_name} failed")
Example #5
0
    def remove_subvolumegroup(
        self, client, vol_name, group_name, validate=True, **kwargs
    ):
        """

        Args:
            client:
            vol_name:
            group_name:
            validate:
            **kwargs:
                --force

        Returns:

        """
        rmsubvolumegroup_cmd = f"ceph fs subvolumegroup rm {vol_name} {group_name}"
        if kwargs.get("force"):
            rmsubvolumegroup_cmd += " --force"
        client.exec_command(sudo=True, cmd=rmsubvolumegroup_cmd)
        if validate:
            out, rc = client.exec_command(
                sudo=True, cmd=f"ceph fs subvolumegroup ls {vol_name} --format json"
            )
            subvolumegroup_ls = json.loads(out.read().decode())
            if group_name in [i["name"] for i in subvolumegroup_ls]:
                raise CommandFailed(f"Deletion of subvolume group: {group_name} failed")
Example #6
0
 def create_subvolumegroup(
     self, client, vol_name, group_name, validate=True, **kwargs
 ):
     """
     Args:
         vol_name:
         group_name:
         **kwargs:
             pool_layout
             uid
             gid
             mode
             validate = True
     Returns:
     """
     subvolumegroup_cmd = f"ceph fs subvolumegroup create {vol_name} {group_name}"
     if kwargs.get("pool_layout"):
         subvolumegroup_cmd += f" --pool_layout {kwargs.get('pool_layout')}"
     if kwargs.get("uid"):
         subvolumegroup_cmd += f" --uid {kwargs.get('uid')}"
     if kwargs.get("gid"):
         subvolumegroup_cmd += f" --gid {kwargs.get('gid')}"
     if kwargs.get("mode"):
         subvolumegroup_cmd += f" --mode {kwargs.get('mode')}"
     client.exec_command(sudo=True, cmd=subvolumegroup_cmd)
     if validate:
         out, rc = client.exec_command(
             sudo=True, cmd=f"ceph fs subvolumegroup ls {vol_name} --format json"
         )
         subvolumegroup_ls = json.loads(out.read().decode())
         if group_name not in [i["name"] for i in subvolumegroup_ls]:
             raise CommandFailed(f"Creation of subvolume group: {group_name} failed")
Example #7
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573878   Verify the option to enable/disable multiFS support
    Pre-requisites :
    1. We need atleast one client node to execute this test case

    Test Case Flow:
    1. check the enable_multiple flag value
    2. Get total number of filesystems present
    3. Disable enable_multiple if enabled and try creating filesystem
    4. Enable enable_multiple and try creating filesystem
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        client1 = clients[0]
        total_fs = fs_util.get_fs_details(client1)
        if len(total_fs) == 1:
            client1.exec_command(sudo=True,
                                 cmd="ceph fs flag set enable_multiple false")
        out, rc = client1.exec_command(sudo=True,
                                       cmd="ceph fs volume create cephfs_new",
                                       check_ec=False)
        if rc == 0:
            raise CommandFailed(
                "We are able to create multipe filesystems even after setting enable_multiple to false"
            )
        log.info(
            "We are not able to create multipe filesystems after setting enable_multiple to false as expected"
        )
        client1.exec_command(sudo=True,
                             cmd="ceph fs flag set enable_multiple true")
        client1.exec_command(sudo=True, cmd="ceph fs volume create cephfs_new")
        log.info(
            "We are able to create multipe filesystems after setting enable_multiple to True as expected"
        )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    finally:
        commands = [
            "ceph config set mon mon_allow_pool_delete true",
            "ceph fs volume rm cephfs_new --yes-i-really-mean-it",
        ]
        for command in commands:
            client1.exec_command(sudo=True, cmd=command)
Example #8
0
def validate_fs_info(expected_fs, output):
    """
    Validate fs information restriction for clients
    :param expected_fs - only fs expected to show in output
    :param output - output of 'ceph fs ls'
    """
    if len(output) != 1 and output[0]["name"] != expected_fs:
        raise CommandFailed("fs is not matching with authorized FS")
    log.info(f"File systems Information is restricted to {expected_fs} only")
Example #9
0
def execute(cmd, fail_ok=False, merge_stderr=False):
    """Executes specified command for the given action."""
    cmdlist = shlex.split(cmd)
    stdout = subprocess.PIPE
    stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
    proc = subprocess.Popen(cmdlist, stdout=stdout, stderr=stderr)
    result, result_err = proc.communicate()
    result = result.decode("utf-8")
    if not fail_ok and proc.returncode != 0:
        raise CommandFailed(proc.returncode, cmd, result, result_err)
    return json.loads(result)
 def create_clone(
     self,
     client,
     vol_name,
     subvol_name,
     snap_name,
     target_subvol_name,
     validate=True,
     **kwargs,
 ):
     """
     Creates clone based on the arguments vol_name,subvol_name,snap_name,target_subvol_name
     It supports below optional arguments also
     Args:
         client:
         vol_name:
         subvol_name:
         snap_name:
         target_subvol_name:
         validate:
         **kwargs:
             group_name
             target_group_name
             pool_layout
             check_ec = True
     Returns:
         Returns the cmd_out and cmd_rc for Create cmd
     """
     clone_cmd = f"ceph fs subvolume snapshot clone {vol_name} {subvol_name} {snap_name} {target_subvol_name}"
     if kwargs.get("group_name"):
         clone_cmd += f" --group_name {kwargs.get('group_name')}"
     if kwargs.get("target_group_name"):
         clone_cmd += f" --target_group_name {kwargs.get('target_group_name')}"
     if kwargs.get("pool_layout"):
         clone_cmd += f" --pool_layout {kwargs.get('pool_layout')}"
     cmd_out, cmd_rc = client.exec_command(
         sudo=True, cmd=clone_cmd, check_ec=kwargs.get("check_ec", True)
     )
     if validate:
         listsubvolumes_cmd = f"ceph fs subvolume ls {vol_name}"
         if kwargs.get("target_group_name"):
             listsubvolumes_cmd += (
                 f" --target_group_name {kwargs.get('target_group_name')}"
             )
         out, rc = client.exec_command(
             sudo=True, cmd=f"{listsubvolumes_cmd} --format json"
         )
         subvolume_ls = json.loads(out.read().decode())
         if target_subvol_name not in [i["name"] for i in subvolume_ls]:
             raise CommandFailed(f"Creation of clone : {target_subvol_name} failed")
     return cmd_out, cmd_rc
 def create_subvolume(self, client, vol_name, subvol_name, validate=True, **kwargs):
     """
     Creates Subvolume with given arguments
     It supports below optional arguments also
     Args:
         client:
         vol_name:
         subvol_name:
         validate:
         **kwargs:
             size : str in mb
             group_name : str
             pool_layout : str
             uid : str
             gid : str
             mode : str
             namespace-isolated : boolean
             check_ec = True
     Returns:
         Returns the cmd_out and cmd_rc for Create cmd
     """
     subvolume_cmd = f"ceph fs subvolume create {vol_name} {subvol_name}"
     if kwargs.get("size"):
         subvolume_cmd += f" --size {kwargs.get('size')}"
     if kwargs.get("group_name"):
         subvolume_cmd += f" --group_name {kwargs.get('group_name')}"
     if kwargs.get("pool_layout"):
         subvolume_cmd += f" --pool_layout {kwargs.get('pool_layout')}"
     if kwargs.get("uid"):
         subvolume_cmd += f" --uid {kwargs.get('uid')}"
     if kwargs.get("gid"):
         subvolume_cmd += f" --gid {kwargs.get('gid')}"
     if kwargs.get("mode"):
         subvolume_cmd += f" --mode {kwargs.get('mode')}"
     if kwargs.get("namespace-isolated"):
         subvolume_cmd += " --namespace-isolated"
     cmd_out, cmd_rc = client.exec_command(
         sudo=True, cmd=subvolume_cmd, check_ec=kwargs.get("check_ec", True)
     )
     if validate:
         listsubvolumes_cmd = f"ceph fs subvolume ls {vol_name}"
         if kwargs.get("group_name"):
             listsubvolumes_cmd += f" --group_name {kwargs.get('group_name')}"
         out, rc = client.exec_command(
             sudo=True, cmd=f"{listsubvolumes_cmd} --format json"
         )
         subvolume_ls = json.loads(out.read().decode())
         if subvol_name not in [i["name"] for i in subvolume_ls]:
             raise CommandFailed(f"Creation of subvolume : {subvol_name} failed")
     return cmd_out, cmd_rc
Example #12
0
    def create_clone(
        self,
        client,
        vol_name,
        subvol_name,
        snap_name,
        target_subvol_name,
        validate=True,
        **kwargs,
    ):
        """

        Args:
            client:
            vol_name:
            subvol_name:
            snap_name:
            target_subvol_name:
            validate:
            **kwargs:
                group_name
                target_group_name
                pool_layout

        Returns:

        """
        clone_cmd = f"ceph fs subvolume snapshot clone {vol_name} {subvol_name} {snap_name} {target_subvol_name}"
        if kwargs.get("group_name"):
            clone_cmd += f" --group_name {kwargs.get('group_name')}"
        if kwargs.get("target_group_name"):
            clone_cmd += f" --target_group_name {kwargs.get('target_group_name')}"
        if kwargs.get("pool_layout"):
            clone_cmd += f" --pool_layout {kwargs.get('pool_layout')}"
        client.exec_command(sudo=True, cmd=clone_cmd)
        if validate:
            listsubvolumes_cmd = f"ceph fs subvolume ls {vol_name}"
            if kwargs.get("target_group_name"):
                listsubvolumes_cmd += (
                    f" --target_group_name {kwargs.get('target_group_name')}"
                )
            out, rc = client.exec_command(
                sudo=True, cmd=f"{listsubvolumes_cmd} --format json"
            )
            subvolume_ls = json.loads(out.read().decode())
            if target_subvol_name not in [i["name"] for i in subvolume_ls]:
                raise CommandFailed(f"Creation of clone : {target_subvol_name} failed")
Example #13
0
    def create_fs(self, client, vol_name, validate=True, **kwargs):
        """

        Args:
            client:
            vol_name:

        Returns:

        """
        fs_cmd = f"ceph fs create {vol_name}"
        client.exec_command(sudo=True, cmd=fs_cmd)
        if validate:
            out, rc = client.exec_command(sudo=True, cmd="ceph fs ls --format json")
            volname_ls = json.loads(out.read().decode())
            if vol_name not in [i["name"] for i in volname_ls]:
                raise CommandFailed(f"Creation of filesystem: {vol_name} failed")
Example #14
0
def run(ceph_cluster, **kw):
    """
    Test Cases Covered:
    CEPH-83573873   Try creating 2 Filesystem using same Pool(negative)
    Pre-requisites :
    1. We need atleast one client node to execute this test case

    Test Case Flow:
    1. Check if cephfs filesystem is present, if not create cephfs
    2. collect data pool and meta datapool info of cephfs
    3. try creating cephfs1 with data pool and meta datapool of cephfs
    """
    try:
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        clients = ceph_cluster.get_ceph_objects("client")
        build = config.get("build", config.get("rhbuild"))
        fs_util.prepare_clients(clients, build)
        fs_util.auth_list(clients)
        log.info("checking Pre-requisites")
        if not clients:
            log.info(
                f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
            )
            return 1
        client1 = clients[0]
        fs_details = fs_util.get_fs_info(client1)
        if not fs_details:
            fs_util.create_fs(client1, "cephfs")
        fs_details = fs_util.get_fs_info(client1)
        out, rc = client1.exec_command(
            sudo=True,
            cmd=
            f"ceph fs new cephfs1 {fs_details['metadata_pool_name']} {fs_details['data_pool_name']}",
            check_ec=False,
        )
        if rc == 0:
            raise CommandFailed(
                "We are able to create filesystems with same pool used by other filesystem"
            )
        return 0
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #15
0
    def create_subvolume(self, client, vol_name, subvol_name, validate=True, **kwargs):
        """

        Args:
            client:
            vol_name:
            subvol_name:
            validate:
            **kwargs:
                size : str in mb
                group_name : str
                pool_layout : str
                uid : str
                gid : str
                mode : str
                namespace-isolated : boolean
        Returns:

        """
        subvolume_cmd = f"ceph fs subvolume create {vol_name} {subvol_name}"
        if kwargs.get("size"):
            subvolume_cmd += f" --size {kwargs.get('size')}"
        if kwargs.get("group_name"):
            subvolume_cmd += f" --group_name {kwargs.get('group_name')}"
        if kwargs.get("pool_layout"):
            subvolume_cmd += f" --pool_layout {kwargs.get('pool_layout')}"
        if kwargs.get("uid"):
            subvolume_cmd += f" --uid {kwargs.get('uid')}"
        if kwargs.get("gid"):
            subvolume_cmd += f" --gid {kwargs.get('gid')}"
        if kwargs.get("mode"):
            subvolume_cmd += f" --mode {kwargs.get('mode')}"
        if kwargs.get("namespace-isolated"):
            subvolume_cmd += " --namespace-isolated"
        client.exec_command(sudo=True, cmd=subvolume_cmd)
        if validate:
            listsubvolumes_cmd = f"ceph fs subvolume ls {vol_name}"
            if kwargs.get("group_name"):
                listsubvolumes_cmd += f" --group_name {kwargs.get('group_name')}"
            out, rc = client.exec_command(
                sudo=True, cmd=f"{listsubvolumes_cmd} --format json"
            )
            subvolume_ls = json.loads(out.read().decode())
            if subvol_name not in [i["name"] for i in subvolume_ls]:
                raise CommandFailed(f"Creation of subvolume : {subvol_name} failed")
Example #16
0
    def remove_fs(self, client, vol_name, validate=True, **kwargs):
        """

        Args:
            client:
            vol_name:
            validate:
            **kwargs:

        Returns:

        """
        rmvolume_cmd = f"ceph fs volume rm {vol_name} --yes-i-really-mean-it"
        client.exec_command(sudo=True, cmd=rmvolume_cmd)
        if validate:
            out, rc = client.exec_command(sudo=True, cmd="ceph fs ls --format json")
            volname_ls = json.loads(out.read().decode())
            if vol_name in [i["name"] for i in volname_ls]:
                raise CommandFailed(f"Creation of filesystem: {vol_name} failed")
    def remove_snapshot(
        self, client, vol_name, subvol_name, snap_name, validate=True, **kwargs
    ):
        """
        Removes the snapshot by taking snap_name,vol_name, subvol_name
        It supports below optional arguments also
        Args:
            client:
            vol_name:
            subvol_name:
            snap_name:
            validate:
            **kwargs:
                group_name : str
                force : boolean
                check_ec : boolean

        Returns:
            Returns the cmd_out and cmd_rc for remove cmd
        """
        rmsnapshot_cmd = (
            f"ceph fs subvolume snapshot rm {vol_name} {subvol_name} {snap_name}"
        )
        if kwargs.get("group_name"):
            rmsnapshot_cmd += f" --group_name {kwargs.get('group_name')}"
        if kwargs.get("force"):
            rmsnapshot_cmd += " --force"
        cmd_out, cmd_rc = client.exec_command(
            sudo=True, cmd=rmsnapshot_cmd, check_ec=kwargs.get("check_ec", True)
        )
        if validate:
            listsnapshot_cmd = f"ceph fs subvolume snapshot ls {vol_name} {subvol_name}"
            if kwargs.get("group_name"):
                listsnapshot_cmd += f" --group_name {kwargs.get('group_name')}"
            out, rc = client.exec_command(
                sudo=True, cmd=f"{listsnapshot_cmd} --format json"
            )
            snapshot_ls = json.loads(out.read().decode())
            if snap_name in [i["name"] for i in snapshot_ls]:
                raise CommandFailed(f"Remove of snapshot : {snap_name} failed")
        return cmd_out, cmd_rc
 def create_subvolumegroup(
     self, client, vol_name, group_name, validate=True, **kwargs
 ):
     """
     Create subvolume group with vol_name, group_name
     It supports below optional arguments also
     Args:
         vol_name:
         group_name:
         **kwargs:
             pool_layout
             uid
             gid
             mode
             validate = True
             check_ec = True
     Returns:
         Returns the cmd_out and cmd_rc for Create cmd
     """
     subvolumegroup_cmd = f"ceph fs subvolumegroup create {vol_name} {group_name}"
     if kwargs.get("pool_layout"):
         subvolumegroup_cmd += f" --pool_layout {kwargs.get('pool_layout')}"
     if kwargs.get("uid"):
         subvolumegroup_cmd += f" --uid {kwargs.get('uid')}"
     if kwargs.get("gid"):
         subvolumegroup_cmd += f" --gid {kwargs.get('gid')}"
     if kwargs.get("mode"):
         subvolumegroup_cmd += f" --mode {kwargs.get('mode')}"
     cmd_out, cmd_rc = client.exec_command(
         sudo=True, cmd=subvolumegroup_cmd, check_ec=kwargs.get("check_ec", True)
     )
     if validate:
         out, rc = client.exec_command(
             sudo=True, cmd=f"ceph fs subvolumegroup ls {vol_name} --format json"
         )
         subvolumegroup_ls = json.loads(out.read().decode())
         if group_name not in [i["name"] for i in subvolumegroup_ls]:
             raise CommandFailed(f"Creation of subvolume group: {group_name} failed")
     return cmd_out, cmd_rc
 def create_snapshot(
     self, client, vol_name, subvol_name, snap_name, validate=True, **kwargs
 ):
     """
     Create snapshot with vol_name, subvol_name, snap_name
     It supports below optional arguments also
     Args:
         client:
         vol_name:
         subvol_name:
         snap_name:
         validate:
         **kwargs:
             group_name : str
             check_ec = True
     Returns:
         Returns the cmd_out and cmd_rc for Create cmd
     """
     snapshot_cmd = (
         f"ceph fs subvolume snapshot create {vol_name} {subvol_name} {snap_name}"
     )
     if kwargs.get("group_name"):
         snapshot_cmd += f" --group_name {kwargs.get('group_name')}"
     cmd_out, cmd_rc = client.exec_command(
         sudo=True, cmd=snapshot_cmd, check_ec=kwargs.get("check_ec", True)
     )
     if validate:
         listsnapshot_cmd = f"ceph fs subvolume snapshot ls {vol_name} {subvol_name}"
         if kwargs.get("group_name"):
             listsnapshot_cmd += f" --group_name {kwargs.get('group_name')}"
         out, rc = client.exec_command(
             sudo=True, cmd=f"{listsnapshot_cmd} --format json"
         )
         snapshot_ls = json.loads(out.read().decode())
         if snap_name not in [i["name"] for i in snapshot_ls]:
             raise CommandFailed(f"Creation of subvolume : {snap_name} failed")
     return cmd_out, cmd_rc
    def create_fs(self, client, vol_name, validate=True, **kwargs):
        """
        This Function creates the cephfs volume with vol_name given
        It validates the creation operation by default.
        It supports below optional arguments also
        Args:
            client:
            vol_name:
            validate:
            **kwargs:
                check_ec = True
        Returns:

        """
        fs_cmd = f"ceph fs create {vol_name}"
        cmd_out, cmd_rc = client.exec_command(
            sudo=True, cmd=fs_cmd, check_ec=kwargs.get("check_ec", True)
        )
        if validate:
            out, rc = client.exec_command(sudo=True, cmd="ceph fs ls --format json")
            volname_ls = json.loads(out.read().decode())
            if vol_name not in [i["name"] for i in volname_ls]:
                raise CommandFailed(f"Creation of filesystem: {vol_name} failed")
        return cmd_out, cmd_rc
Example #21
0
    def remove_snapshot(
        self, client, vol_name, subvol_name, snap_name, validate=True, **kwargs
    ):
        """

        Args:
            client:
            vol_name:
            subvol_name:
            snap_name:
            validate:
            **kwargs:
                group_name : str
                force : boolean

        Returns:

        """
        rmsnapshot_cmd = (
            f"ceph fs subvolume snapshot rm {vol_name} {subvol_name} {snap_name}"
        )
        if kwargs.get("group_name"):
            rmsnapshot_cmd += f" --group_name {kwargs.get('group_name')}"
        if kwargs.get("force"):
            rmsnapshot_cmd += " --force"
        client.exec_command(sudo=True, cmd=rmsnapshot_cmd)
        if validate:
            listsnapshot_cmd = f"ceph fs subvolume snapshot ls {vol_name} {subvol_name}"
            if kwargs.get("group_name"):
                listsnapshot_cmd += f" --group_name {kwargs.get('group_name')}"
            out, rc = client.exec_command(
                sudo=True, cmd=f"{listsnapshot_cmd} --format json"
            )
            snapshot_ls = json.loads(out.read().decode())
            if snap_name in [i["name"] for i in snapshot_ls]:
                raise CommandFailed(f"Remove of snapshot : {snap_name} failed")
    def remove_fs(self, client, vol_name, validate=True, **kwargs):
        """
        Removes the filesystem with the vol_name as argument
        It supports below optional arguments also
        Args:
            client:
            vol_name:
            validate:
            **kwargs:
                check_ec : boolean

        Returns:
            Returns the cmd_out and cmd_rc for remove cmd
        """
        rmvolume_cmd = f"ceph fs volume rm {vol_name} --yes-i-really-mean-it"
        cmd_out, cmd_rc = client.exec_command(
            sudo=True, cmd=rmvolume_cmd, check_ec=kwargs.get("check_ec", True)
        )
        if validate:
            out, rc = client.exec_command(sudo=True, cmd="ceph fs ls --format json")
            volname_ls = json.loads(out.read().decode())
            if vol_name in [i["name"] for i in volname_ls]:
                raise CommandFailed(f"Creation of filesystem: {vol_name} failed")
        return cmd_out, cmd_rc
Example #23
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11298"
        source_dir = "/mnt/source"
        target_dir = "target"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        for client in client_info["clients"]:
            client.exec_command(cmd="sudo rm -rf  %s" % source_dir)
            client.exec_command(cmd="sudo mkdir %s" % source_dir)

        for client in client_info["clients"]:
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], target_dir))
            break
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    source_dir,
                    "",
                    0,
                    100,
                    iotype="touch")
            p.spawn(fs_util.read_write_IO, client1, source_dir, "g", "write")
            p.spawn(fs_util.stress_io,
                    client2,
                    source_dir,
                    "",
                    0,
                    10,
                    iotype="dd")
            p.spawn(fs_util.stress_io,
                    client3,
                    source_dir,
                    "",
                    0,
                    10,
                    iotype="smallfile")
            p.spawn(fs_util.stress_io,
                    client4,
                    source_dir,
                    "",
                    0,
                    1,
                    iotype="fio")
            for op in p:
                return_counts1, rc = op

        with parallel() as p:
            p.spawn(
                fs_util.rsync,
                client1,
                source_dir,
                "%s%s" % (client_info["mounting_dir"], target_dir),
            )
            p.spawn(
                fs_util.rsync,
                client2,
                source_dir,
                "%s%s" % (client_info["mounting_dir"], target_dir),
            )
            p.spawn(
                fs_util.rsync,
                client3,
                source_dir,
                "%s%s" % (client_info["mounting_dir"], target_dir),
            )

            p.spawn(
                fs_util.rsync,
                client4,
                source_dir,
                "%s%s" % (client_info["mounting_dir"], target_dir),
            )
            for op in p:
                return_counts2, rc = op

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                target_dir,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                target_dir,
                0,
                11,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                target_dir,
                0,
                3,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                target_dir,
                0,
                1,
                iotype="fio",
            )
            for op in p:
                return_counts3, rc = op
        with parallel() as p:
            p.spawn(
                fs_util.rsync,
                client1,
                "%s%s/*" % (client_info["mounting_dir"], target_dir),
                source_dir,
            )
            p.spawn(
                fs_util.rsync,
                client2,
                "%s%s/*" % (client_info["mounting_dir"], target_dir),
                source_dir,
            )
            p.spawn(
                fs_util.rsync,
                client3,
                "%s%s/*" % (client_info["mounting_dir"], target_dir),
                source_dir,
            )
            p.spawn(
                fs_util.rsync,
                client4,
                "%s%s/*" % (client_info["mounting_dir"], target_dir),
                source_dir,
            )
            for op in p:
                return_counts4, rc = op

        rc = (list(return_counts1.values()) + list(return_counts2.values()) +
              list(return_counts3.values()) + list(return_counts4.values()))
        rc_set = set(rc)
        if len(rc_set) == 1:
            print("Test case CEPH-%s passed" % (tc))
        else:
            print(("Test case CEPH-%s failed" % (tc)))
        log.info("Test completed for CEPH-%s" % (tc))
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        else:
            return 1
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            fs_util.client_clean_up(client_info["fuse_clients"], "",
                                    client_info["mounting_dir"], "umount")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #24
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        config = kw.get("config")
        num_of_dirs = config.get("num_of_dirs")
        num_of_dirs = num_of_dirs / 5
        tc = "11228"
        dir_name = "dir"
        test_dir = "testdir/"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)

        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="crefi",
            )
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                1,
                iotype="crefi",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            print("Data validation success")
            fs_util.activate_multiple_mdss(client_info["mds_nodes"])
            log.info("Execution of Test case CEPH-%s started:" % (tc))
            for client in client1:
                client.exec_command(cmd="sudo mkdir %s%s" %
                                    (client_info["mounting_dir"], test_dir))

            with parallel() as p:
                p.spawn(
                    fs_util.mkdir_bulk,
                    client1,
                    0,
                    num_of_dirs * 2,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 2 + 1,
                    num_of_dirs * 4,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 4 + 1,
                    num_of_dirs * 6,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 6 + 1,
                    num_of_dirs * 8,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                p.spawn(
                    fs_util.mkdir_bulk,
                    client2,
                    num_of_dirs * 8 + 1,
                    num_of_dirs * 10,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                )
                for op in p:
                    rc = op
            if rc == 0:
                log.info("Directories created successfully")
            else:
                raise CommandFailed("Directory creation failed")

            with parallel() as p:
                p.spawn(
                    fs_util.pinning,
                    client2,
                    0,
                    num_of_dirs * 1,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    1,
                )
                p.spawn(
                    fs_util.pinning,
                    client3,
                    num_of_dirs * 1,
                    num_of_dirs * 2,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    1,
                )
                p.spawn(
                    fs_util.pinning,
                    client4,
                    num_of_dirs * 2,
                    num_of_dirs * 3,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    1,
                )
                p.spawn(
                    fs_util.pinning,
                    client1,
                    num_of_dirs * 3,
                    num_of_dirs * 4,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    0,
                )
                p.spawn(
                    fs_util.pinning,
                    client3,
                    num_of_dirs * 4,
                    num_of_dirs * 5,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    0,
                )

            with parallel() as p:
                p.spawn(
                    fs_util.pinned_dir_io_mdsfailover,
                    client1,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    num_of_dirs * 1,
                    num_of_dirs * 5,
                    10,
                    fs_util.mds_fail_over,
                    client_info["mds_nodes"],
                )
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(
                    fs_util.pinned_dir_io_mdsfailover,
                    client3,
                    client_info["mounting_dir"] + test_dir,
                    dir_name,
                    num_of_dirs * 7,
                    num_of_dirs * 8,
                    20,
                    fs_util.mds_fail_over,
                    client_info["mds_nodes"],
                )
                for op in p:
                    return_counts, rc = op
            log.info("Execution of Test case CEPH-%s ended:" % (tc))
            print("Results:")
            result = fs_util.rc_verify(tc, return_counts)
            print(result)
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
                rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

            else:
                rc_client = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
                rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

            if rc_client == 0 and rc_mds == 0:
                log.info("Cleaning up successfull")
            else:
                return 1
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc_client = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
            rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

        else:
            rc_client = fs_util.client_clean_up(client_info["fuse_clients"],
                                                "",
                                                client_info["mounting_dir"],
                                                "umount")
            rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None)

        if rc_client == 0 and rc_mds == 0:
            log.info("Cleaning up successfull")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #25
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "10625,11225"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1
        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            log.error("Activate multiple mdss failed")
            return 1
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                "",
                0,
                1,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            dirs, rc = fs_util.mkdir(client1, 0, 6,
                                     client_info["mounting_dir"], dir_name)
            if rc == 0:
                log.info("Directories created")
            else:
                raise CommandFailed("Directory creation failed")
            dirs = dirs.split("\n")
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_rename",
                    fnum=1000,
                    fsize=10,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_delete-renamed",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=1000,
                    fsize=10,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[3],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[4],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[5],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[6],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1,
                    fsize=1000000,
                )

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[3],
                    0,
                    1,
                    iotype="smallfile_rename",
                    fnum=1,
                    fsize=1000000,
                )
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[3],
                    0,
                    1,
                    iotype="smallfile_delete-renamed",
                    fnum=1,
                    fsize=1000000,
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[4],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=1,
                    fsize=1000000,
                )
        dir_name = "!@#$%^&*()-_=+[]{};:,.<>?"
        out, rc = client1[0].exec_command(
            cmd="sudo mkdir '%s%s'" % (client_info["mounting_dir"], dir_name))
        if client1[0].node.exit_status == 0:
            log.info("Directory created")
        else:
            raise CommandFailed("Directory creation failed")
        for client in client_info["fuse_clients"]:
            file_name = "".join(
                random.choice(string.ascii_lowercase + string.digits)
                for _ in range(255))
            client.exec_command(
                cmd="sudo touch '%s%s/%s'" %
                (client_info["mounting_dir"], dir_name, file_name))
        for client in client_info["kernel_clients"]:
            if client.pkg_type == "rpm":
                file_name = "".join(
                    random.choice(string.ascii_lowercase + string.digits)
                    for _ in range(255))
                client.exec_command(
                    cmd="sudo touch '%s%s/%s'" %
                    (client_info["mounting_dir"], dir_name, file_name))
        for num in range(0, 5):
            for client in client_info["fuse_clients"]:
                ops = ["create", "setxattr", "getxattr", "chmod", "rename"]
                for op in ops:
                    client.exec_command(
                        sudo=True,
                        cmd=
                        f"python3 smallfile/smallfile_cli.py --operation {op} --threads 10 --file-size 4 "
                        f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
                        f"{client_info['mounting_dir']}{dir_name}",
                        long_running=True,
                        timeout=300,
                    )
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        else:
            return 1
        log.info("Execution of Test cases CEPH-%s ended:" % (tc))
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        if rc == 0:
            log.info("Cleaning up successfull")
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #26
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = '11298'
        source_dir = '/mnt/source'
        target_dir = 'target'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        client_info, rc = fs_util.get_clients()
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        for client in client_info['clients']:
            client.exec_command(cmd='sudo rm -rf  %s' % source_dir)
            client.exec_command(cmd='sudo mkdir %s' % source_dir)

        for client in client_info['clients']:
            client.exec_command(cmd='sudo mkdir %s%s' %
                                (client_info['mounting_dir'], target_dir))
            break
        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    source_dir,
                    '',
                    0,
                    100,
                    iotype='touch')
            p.spawn(fs_util.read_write_IO, client1, source_dir, 'g', 'write')
            p.spawn(fs_util.stress_io,
                    client2,
                    source_dir,
                    '',
                    0,
                    10,
                    iotype='dd')
            p.spawn(fs_util.stress_io,
                    client3,
                    source_dir,
                    '',
                    0,
                    10,
                    iotype='crefi')
            p.spawn(fs_util.stress_io,
                    client4,
                    source_dir,
                    '',
                    0,
                    1,
                    iotype='fio')
            for op in p:
                return_counts1, rc = op

        with parallel() as p:
            p.spawn(fs_util.rsync, client1, source_dir,
                    '%s%s' % (client_info['mounting_dir'], target_dir))
            p.spawn(fs_util.rsync, client2, source_dir,
                    '%s%s' % (client_info['mounting_dir'], target_dir))
            p.spawn(fs_util.rsync, client3, source_dir,
                    '%s%s' % (client_info['mounting_dir'], target_dir))
            p.spawn(fs_util.rsync, client4, source_dir,
                    '%s%s' % (client_info['mounting_dir'], target_dir))
            for op in p:
                return_counts2, rc = op

        with parallel() as p:
            p.spawn(fs_util.stress_io,
                    client1,
                    client_info['mounting_dir'],
                    target_dir,
                    0,
                    100,
                    iotype='touch')
            p.spawn(fs_util.stress_io,
                    client2,
                    client_info['mounting_dir'],
                    target_dir,
                    0,
                    11,
                    iotype='dd')
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    target_dir,
                    0,
                    3,
                    iotype='fio')
            p.spawn(fs_util.stress_io,
                    client4,
                    client_info['mounting_dir'],
                    target_dir,
                    0,
                    1,
                    iotype='fio')
            for op in p:
                return_counts3, rc = op
        with parallel() as p:
            p.spawn(fs_util.rsync, client1,
                    '%s%s/*' % (client_info['mounting_dir'], target_dir),
                    source_dir)
            p.spawn(fs_util.rsync, client2,
                    '%s%s/*' % (client_info['mounting_dir'], target_dir),
                    source_dir)
            p.spawn(fs_util.rsync, client3,
                    '%s%s/*' % (client_info['mounting_dir'], target_dir),
                    source_dir)
            p.spawn(fs_util.rsync, client4,
                    '%s%s/*' % (client_info['mounting_dir'], target_dir),
                    source_dir)
            for op in p:
                return_counts4, rc = op

        rc = list(return_counts1.values()) + list(return_counts2.values()) + \
            list(return_counts3.values()) + list(return_counts4.values())
        rc_set = set(rc)
        if len(rc_set) == 1:
            print("Test case CEPH-%s passed" % (tc))
        else:
            print(("Test case CEPH-%s failed" % (tc)))
        log.info("Test completed for CEPH-%s" % (tc))
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(client_info['fuse_clients'],
                                         client_info['kernel_clients'],
                                         client_info['mounting_dir'], 'umount')
        else:
            rc = fs_util.client_clean_up(client_info['fuse_clients'], '',
                                         client_info['mounting_dir'], 'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        else:
            return 1
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0
    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            fs_util.client_clean_up(client_info['fuse_clients'],
                                    client_info['kernel_clients'],
                                    client_info['mounting_dir'], 'umount')
        else:
            fs_util.client_clean_up(client_info['fuse_clients'], '',
                                    client_info['mounting_dir'], 'umount')
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #27
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        config = kw.get('config')
        num_of_dirs = config.get('num_of_dirs')
        tc = '11227'
        dir_name = 'dir'
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info['fuse_clients'][0])
        client2.append(client_info['fuse_clients'][1])
        client3.append(client_info['kernel_clients'][0])
        client4.append(client_info['kernel_clients'][1])

        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'],
                                   client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        with parallel() as p:
            p.spawn(fs_util.read_write_IO, client1,
                    client_info['mounting_dir'], 'g', 'write')
            p.spawn(fs_util.read_write_IO, client2,
                    client_info['mounting_dir'], 'g', 'read')
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info['mounting_dir'],
                '',
                0,
                2,
                iotype='crefi',
            )
            p.spawn(fs_util.stress_io,
                    client3,
                    client_info['mounting_dir'],
                    '',
                    0,
                    2,
                    iotype='crefi')
            p.spawn(fs_util.read_write_IO, client4,
                    client_info['mounting_dir'], 'g', 'readwrite')
            p.spawn(fs_util.read_write_IO, client3,
                    client_info['mounting_dir'])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify('', return_counts)

        client1[0].exec_command(cmd='sudo mkdir %s%s' %
                                (client_info['mounting_dir'], 'testdir'))

        if result == 'Data validation success':
            print("Data validation success")
            fs_util.activate_multiple_mdss(client_info['mds_nodes'])
            log.info("Execution of Test case CEPH-%s started:" % (tc))
            num_of_dirs = int(num_of_dirs / 5)
            with parallel() as p:
                p.spawn(fs_util.mkdir_bulk, client1, 0, num_of_dirs * 1,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client2, num_of_dirs * 1 + 1,
                        num_of_dirs * 2,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client1, num_of_dirs * 2 + 1,
                        num_of_dirs * 3,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client2, num_of_dirs * 3 + 1,
                        num_of_dirs * 4,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                p.spawn(fs_util.mkdir_bulk, client1, num_of_dirs * 4 + 1,
                        num_of_dirs * 5,
                        client_info['mounting_dir'] + 'testdir/', dir_name)
                for op in p:
                    rc = op
            if rc == 0:
                log.info('Directories created successfully')
            else:
                raise CommandFailed('Directory creation failed')

            with parallel() as p:
                p.spawn(fs_util.max_dir_io, client1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        num_of_dirs * 1, 10)
                p.spawn(fs_util.max_dir_io, client2,
                        client_info['mounting_dir'] + '/testdir/', dir_name,
                        num_of_dirs * 1, num_of_dirs * 2, 10)
                rc = fs_util.check_mount_exists(client1[0])
                if rc == 0:
                    fs_util.pinning(client1, 0, 10,
                                    client_info['mounting_dir'] + 'testdir/',
                                    dir_name, 0)

                p.spawn(fs_util.max_dir_io, client3,
                        client_info['mounting_dir'] + '/testdir/', dir_name,
                        num_of_dirs * 3, num_of_dirs * 4, 10)
                p.spawn(fs_util.max_dir_io, client4,
                        client_info['mounting_dir'] + '/testdir/', dir_name,
                        num_of_dirs * 4, num_of_dirs * 5, 10)

            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])

            with parallel() as p:
                p.spawn(fs_util.pinning, client2, 10, num_of_dirs * 1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client3, num_of_dirs * 1,
                        num_of_dirs * 2,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client4, num_of_dirs * 2,
                        num_of_dirs * 3,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client1, num_of_dirs * 3,
                        num_of_dirs * 4,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)
                p.spawn(fs_util.pinning, client3, num_of_dirs * 4,
                        num_of_dirs * 5,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 1)

            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client1,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client2,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client3,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])
            with parallel() as p:
                p.spawn(fs_util.pinned_dir_io_mdsfailover, client4,
                        client_info['mounting_dir'] + '/testdir/', dir_name, 0,
                        10, 100, fs_util.mds_fail_over,
                        client_info['mds_nodes'])

            log.info("Execution of Test case CEPH-%s ended:" % (tc))
            log.info('Cleaning up!-----')
            if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
                rc_client = fs_util.client_clean_up(
                    client_info['fuse_clients'], client_info['kernel_clients'],
                    client_info['mounting_dir'], 'umount')
                rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

            else:
                rc_client = fs_util.client_clean_up(
                    client_info['fuse_clients'], '',
                    client_info['mounting_dir'], 'umount')
                rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

            if rc_client == 0 and rc_mds == 0:
                log.info('Cleaning up successfull')
            else:
                return 1
        print('Script execution time:------')
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info('Cleaning up!-----')
        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc_client = fs_util.client_clean_up(client_info['fuse_clients'],
                                                client_info['kernel_clients'],
                                                client_info['mounting_dir'],
                                                'umount')
            rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)

        else:
            rc_client = fs_util.client_clean_up(client_info['fuse_clients'],
                                                '',
                                                client_info['mounting_dir'],
                                                'umount')
            rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None)
        if rc_client == 0 and rc_mds == 0:
            log.info('Cleaning up successfull')
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #28
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11222"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        fs_util = FsUtils(ceph_cluster)
        config = kw.get("config")
        num_of_osds = config.get("num_of_osds")
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        client1 = []
        client2 = []
        client3 = []
        client4 = []
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            raise CommandFailed("Activate multiple mdss failed")
        cluster_health_beforeIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )

        dir1 = "".join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(10))
        for client in client_info["clients"]:
            log.info("Creating directory:")
            client.exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir1))
            log.info("Creating directories with breadth and depth:")
            client.exec_command(
                sudo=True,
                cmd=
                f"python3 smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 --files 1000 "
                f"--files-per-dir 10 --dirs-per-dir 2 --top {client_info['mounting_dir']}{dir1}",
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)

            break

        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result1 = fs_util.rc_verify("", return_counts)
        print(result1)

        for client in client_info["clients"]:
            client.exec_command(cmd="sudo rm -rf %s%s" %
                                (client_info["mounting_dir"], dir1))
            break

        for client in client_info["clients"]:
            log.info("Creating directories with breadth and depth:")
            client.exec_command(
                sudo=True,
                cmd="python3 smallfile/smallfile_cli.py "
                "--operation create --threads 10 "
                " --file-size 4 --files 1000 "
                "--files-per-dir 10 --dirs-per-dir 2"
                " --top %s%s" % (client_info["mounting_dir"], dir1),
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)
            log.info("Renaming the dirs:")
            client.exec_command(
                sudo=True,
                cmd="python3 smallfile/smallfile_cli.py "
                "--operation rename --threads 10 --file-size 4"
                " --file-size 4 --files 1000 "
                "--files-per-dir 10 --dirs-per-dir 2"
                " --top %s%s" % (client_info["mounting_dir"], dir1),
                long_running=True,
                timeout=300,
            )
            return_counts = fs_util.io_verify(client)
            result = fs_util.rc_verify("", return_counts)
            print(result)

            break
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="fio",
            )
            p.spawn(
                fs_util.stress_io,
                client1,
                client_info["mounting_dir"],
                dir1,
                0,
                100,
                iotype="touch",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="dd",
            )
            p.spawn(
                fs_util.stress_io,
                client2,
                client_info["mounting_dir"],
                dir1,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result2 = fs_util.rc_verify("", return_counts)
        print(result2)
        cluster_health_afterIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        client1[0].exec_command(cmd="sudo mkdir %s%s" %
                                (client_info["mounting_dir"], dir_name))
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "read",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client3,
                client_info["mounting_dir"],
                "write",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client2,
                client_info["mounting_dir"],
                "read",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "write",
                "m",
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.read_write_IO,
                client3,
                client_info["mounting_dir"],
                dir_name=dir_name,
            )
            p.spawn(
                fs_util.stress_io,
                client4,
                client_info["mounting_dir"],
                dir_name,
                0,
                5,
                iotype="smallfile",
            )
            for op in p:
                return_counts, rc = op
        result = fs_util.rc_verify("11223", return_counts)
        print(result)

        if cluster_health_beforeIO == cluster_health_afterIO:
            print("Testcase %s passed" % (tc))
            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
            else:
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
            if rc == 0:
                log.info("Cleaning up successfull")
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)

        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            fs_util.client_clean_up(client_info["fuse_clients"], "",
                                    client_info["mounting_dir"], "umount")
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #29
0
def run(ceph_cluster, **kw):
    try:
        tc = 'nfs-ganesha'
        nfs_mounting_dir = '/mnt/nfs_mount/'
        log.info("Running cephfs %s test case" % (tc))

        fs_util = FsUtils(ceph_cluster)
        config = kw.get('config')
        build = config.get('build', config.get('rhbuild'))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            raise CommandFailed("fetching client info failed")
        nfs_server = client_info['kernel_clients'][0]
        nfs_client = [client_info['kernel_clients'][1]]
        client1 = [client_info['fuse_clients'][0]]
        client2 = [client_info['fuse_clients'][1]]
        client3 = [client_info['kernel_clients'][0]]
        client4 = [client_info['kernel_clients'][1]]
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            raise CommandFailed("auth list failed")

        rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
        rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            raise CommandFailed("Fuse mount failed")

        rc3 = fs_util.kernel_mount(
            client3,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        rc4 = fs_util.kernel_mount(
            client4,
            client_info['mounting_dir'],
            client_info['mon_node_ip'])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            raise CommandFailed("kernel mount failed")
        dirs, rc = fs_util.mkdir(
            client1, 0, 4, client_info['mounting_dir'], 'dir')
        if rc == 0:
            log.info("Directories created")
        else:
            raise CommandFailed("Directory creation failed")
        dirs = dirs.split('\n')

        rc = fs_util.nfs_ganesha_install(nfs_server)
        if rc == 0:
            log.info('NFS ganesha installed successfully')
        else:
            raise CommandFailed('NFS ganesha installation failed')
        rc = fs_util.nfs_ganesha_conf(nfs_server, 'admin')
        if rc == 0:
            log.info('NFS ganesha config added successfully')
        else:
            raise CommandFailed('NFS ganesha config adding failed')
        rc = fs_util.nfs_ganesha_mount(
            nfs_client[0],
            nfs_mounting_dir,
            nfs_server.node.hostname)
        if rc == 0:
            log.info('NFS-ganesha mount passed')
        else:
            raise CommandFailed('NFS ganesha mount failed')
        with parallel() as p:
            p.spawn(
                fs_util.stress_io,
                nfs_client,
                nfs_mounting_dir + 'ceph/',
                dirs[0],
                0,
                5,
                iotype='fio')
            p.spawn(
                fs_util.stress_io,
                nfs_client,
                nfs_mounting_dir + 'ceph/',
                dirs[2],
                0,
                5,
                iotype='dd')
            p.spawn(
                fs_util.stress_io,
                nfs_client,
                nfs_mounting_dir + 'ceph/',
                dirs[1],
                0,
                1,
                iotype='crefi')
            p.spawn(
                fs_util.stress_io,
                nfs_client,
                nfs_mounting_dir + 'ceph/',
                dirs[3],
                0,
                1,
                iotype='smallfile_create', fnum=1000, fsize=1024)

        for client in nfs_client:
            log.info('Unmounting nfs-ganesha mount on client:')
            client.exec_command(cmd='sudo umount %s -l' % (nfs_mounting_dir))
            log.info('Removing nfs-ganesha mount dir on client:')
            client.exec_command(cmd='sudo rm -rf  %s' % (nfs_mounting_dir))

        if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                client_info['kernel_clients'],
                client_info['mounting_dir'],
                'umount')
        else:
            rc = fs_util.client_clean_up(
                client_info['fuse_clients'],
                '',
                client_info['mounting_dir'],
                'umount')
        if rc == 0:
            log.info('Cleaning up successfull')
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1
Example #30
0
def run(ceph_cluster, **kw):
    try:
        start = timeit.default_timer()
        tc = "11219,11224"
        dir_name = "dir"
        log.info("Running cephfs %s test case" % (tc))
        config = kw.get("config")
        num_of_osds = config.get("num_of_osds")
        fs_util = FsUtils(ceph_cluster)
        build = config.get("build", config.get("rhbuild"))
        client_info, rc = fs_util.get_clients(build)
        if rc == 0:
            log.info("Got client info")
        else:
            log.error("fetching client info failed")
            return 1
        client1, client2, client3, client4 = ([] for _ in range(4))
        client1.append(client_info["fuse_clients"][0])
        client2.append(client_info["fuse_clients"][1])
        client3.append(client_info["kernel_clients"][0])
        client4.append(client_info["kernel_clients"][1])
        cluster_health_beforeIO = check_ceph_healthly(
            client_info["mon_node"][0],
            num_of_osds,
            len(client_info["mon_node"]),
            build,
            None,
            300,
        )
        rc1 = fs_util.auth_list(client1)
        rc2 = fs_util.auth_list(client2)
        rc3 = fs_util.auth_list(client3)
        rc4 = fs_util.auth_list(client4)
        print(rc1, rc2, rc3, rc4)
        if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
            log.info("got auth keys")
        else:
            log.error("auth list failed")
            return 1

        rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"])
        rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"])

        if rc1 == 0 and rc2 == 0:
            log.info("Fuse mount passed")
        else:
            log.error("Fuse mount failed")
            return 1
        rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"],
                                   client_info["mon_node_ip"])
        if rc3 == 0 and rc4 == 0:
            log.info("kernel mount passed")
        else:
            log.error("kernel mount failed")
            return 1
        rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"])
        if rc == 0:
            log.info("Activate multiple mdss successfully")
        else:
            log.error("Activate multiple mdss failed")
            return 1
        client1[0].exec_command(
            sudo=True,
            cmd=f"mkdir {client_info['mounting_dir']}{dir_name}",
        )
        with parallel() as p:
            p.spawn(
                fs_util.read_write_IO,
                client1,
                client_info["mounting_dir"],
                "g",
                "write",
            )
            p.spawn(fs_util.read_write_IO, client2,
                    client_info["mounting_dir"], "g", "read")
            p.spawn(
                fs_util.stress_io,
                client3,
                client_info["mounting_dir"],
                dir_name,
                0,
                2,
                iotype="smallfile",
            )
            p.spawn(
                fs_util.read_write_IO,
                client4,
                client_info["mounting_dir"],
                "g",
                "readwrite",
            )
            p.spawn(fs_util.read_write_IO, client3,
                    client_info["mounting_dir"])
            for op in p:
                return_counts, rc = op

        result = fs_util.rc_verify("", return_counts)
        if result == "Data validation success":
            print("Data validation success")
            dirs, rc = fs_util.mkdir(client1, 0, 3,
                                     client_info["mounting_dir"], dir_name)
            if rc == 0:
                log.info("Directories created")
            else:
                raise CommandFailed("Directory creation failed")
            dirs = dirs.split("\n")
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    100,
                    iotype="touch",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile",
                )
                for op in p:
                    return_counts, rc = op

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=10,
                    fsize=1024,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=10,
                    fsize=1024,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=10,
                    fsize=1024,
                )
                for op in p:
                    return_counts, rc = op
            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=10,
                    fsize=1024,
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[1],
                    0,
                    1,
                    iotype="smallfile_delete",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[2],
                    0,
                    1,
                    iotype="smallfile_delete",
                    fnum=10,
                    fsize=1024,
                )
                for op in p:
                    return_counts, rc = op
            cluster_health_afterIO = check_ceph_healthly(
                client_info["mon_node"][0],
                num_of_osds,
                len(client_info["mon_node"]),
                build,
                None,
                300,
            )

            log.info("Execution of Test case CEPH-%s ended" % (tc))
            print("Results:")
            result = fs_util.rc_verify(tc, return_counts)
            if cluster_health_beforeIO == cluster_health_afterIO:
                print(result)

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    5,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile",
                )

            with parallel() as p:
                p.spawn(
                    fs_util.read_write_IO,
                    client1,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[2],
                )
                p.spawn(
                    fs_util.read_write_IO,
                    client2,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[0],
                )

            with parallel() as p:
                p.spawn(
                    fs_util.stress_io,
                    client1,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile_create",
                    fnum=1000,
                    fsize=10,
                )
                p.spawn(
                    fs_util.stress_io,
                    client2,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    5,
                    iotype="fio",
                )
                p.spawn(
                    fs_util.stress_io,
                    client3,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    10,
                    iotype="dd",
                )
                p.spawn(
                    fs_util.stress_io,
                    client4,
                    client_info["mounting_dir"],
                    dirs[0],
                    0,
                    1,
                    iotype="smallfile",
                )

            with parallel() as p:
                p.spawn(
                    fs_util.read_write_IO,
                    client1,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[0],
                )
                p.spawn(
                    fs_util.read_write_IO,
                    client2,
                    client_info["mounting_dir"],
                    "g",
                    "read",
                    dir_name=dirs[0],
                )

            log.info("Cleaning up!-----")
            if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    client_info["kernel_clients"],
                    client_info["mounting_dir"],
                    "umount",
                )
                if rc == 0:
                    log.info("Cleaning up successfull")
                else:
                    return 1
            else:
                rc = fs_util.client_clean_up(
                    client_info["fuse_clients"],
                    "",
                    client_info["mounting_dir"],
                    "umount",
                )
                if rc == 0:
                    log.info("Cleaning up successfull")
                else:
                    return 1

        log.info("Execution of Test case CEPH-%s ended" % (tc))
        print("Results:")
        result = fs_util.rc_verify(tc, return_counts)
        print(result)
        print("Script execution time:------")
        stop = timeit.default_timer()
        total_time = stop - start
        mins, secs = divmod(total_time, 60)
        hours, mins = divmod(mins, 60)
        print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
        return 0

    except CommandFailed as e:
        log.info(e)
        log.info(traceback.format_exc())
        log.info("Cleaning up!-----")
        if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
            rc = fs_util.client_clean_up(
                client_info["fuse_clients"],
                client_info["kernel_clients"],
                client_info["mounting_dir"],
                "umount",
            )
        else:
            rc = fs_util.client_clean_up(client_info["fuse_clients"], "",
                                         client_info["mounting_dir"], "umount")
        return 1

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        return 1