def mount_volume(cls, mounts):
        """Mount volume

        Args:
            mounts(list): List of mount_objs

        Returns (bool): True if mounting the volume for a mount obj is
            successful. False otherwise
        """
        g.log.info("Starting to mount volume %s", cls.volname)
        for mount_obj in mounts:
            g.log.info("Mounting volume '%s:%s' on '%s:%s'",
                       mount_obj.server_system, mount_obj.volname,
                       mount_obj.client_system, mount_obj.mountpoint)
            ret = mount_obj.mount()
            if not ret:
                g.log.error("Failed to mount volume '%s:%s' on '%s:%s'",
                            mount_obj.server_system, mount_obj.volname,
                            mount_obj.client_system, mount_obj.mountpoint)
                return False
            else:
                g.log.info(
                    "Successful in mounting volume '%s:%s' on "
                    "'%s:%s'", mount_obj.server_system, mount_obj.volname,
                    mount_obj.client_system, mount_obj.mountpoint)
        g.log.info("Successful in mounting all mount objs for the volume %s",
                   cls.volname)

        # Get mounts info
        g.log.info("Get mounts Info:")
        log_mounts_info(mounts)

        return True
    def tearDownClass(cls):
        """Cleanup data from mount and cleanup volume.
        """
        # Log Mounts info
        g.log.info("Log mounts info")
        log_mounts_info(cls.mounts)

        GlusterVolumeBaseClass.tearDownClass.im_func(cls)
    def tearDownClass(cls, umount_volume=True, cleanup_volume=True,
                      teardown_nfsganesha_cluster=True):
        """Cleanup data from mount, cleanup volume and delete nfs ganesha
           cluster.
        """
        # Log Mounts info
        g.log.info("Log mounts info")
        log_mounts_info(cls.mounts)

        (NfsGaneshaVolumeBaseClass.
         tearDownClass.
         im_func(cls,
                 umount_vol=umount_volume, cleanup_vol=cleanup_volume,
                 teardown_nfs_ganesha_cluster=teardown_nfsganesha_cluster))
    def unmount_volume(cls, mounts):
        """Unmount all mounts for the volume

        Args:
            mounts(list): List of mount_objs

        Returns (bool): True if unmounting the volume for a mount obj is
            successful. False otherwise
        """
        # Unmount volume
        g.log.info("Starting to UnMount Volume %s", cls.volname)
        for mount_obj in mounts:
            g.log.info("UnMounting volume '%s:%s' on '%s:%s'",
                       mount_obj.server_system, mount_obj.volname,
                       mount_obj.client_system, mount_obj.mountpoint)
            ret = mount_obj.unmount()
            if not ret:
                g.log.error("Failed to unmount volume '%s:%s' on '%s:%s'",
                            mount_obj.server_system, mount_obj.volname,
                            mount_obj.client_system, mount_obj.mountpoint)

                # Get mounts info
                g.log.info("Get mounts Info:")
                log_mounts_info(cls.mounts)

                return False

            g.log.info("Starting to delete the directory path used for "
                       "mounting")
            cmd = ('rm -rf %s' % mount_obj.mountpoint)
            ret, _, err = g.run(mount_obj.client_system,
                                cmd,
                                user=mount_obj.user)
            if ret:
                g.log.error("failed to delete the directory path used for "
                            "mounting %s: %s" % (mount_obj.mountpoint, err))
                return False

            g.log.info("Successful in deleting the directory path used for "
                       "mounting '%s:%s' on '%s:%s'" %
                       (mount_obj.server_system, mount_obj.volname,
                        mount_obj.client_system, mount_obj.mountpoint))

        # Get mounts info
        g.log.info("Get mounts Info:")
        log_mounts_info(mounts)

        return True
    def setUpClass(cls):
        """Setup volume exports volume with nfs-ganesha,
            mounts the volume.
        """
        NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)

        # Peer probe servers
        ret = peer_probe_servers(cls.mnode, cls.servers)
        if not ret:
            raise ExecutionError("Failed to peer probe servers")

        g.log.info("All peers are in connected state")

        # Peer Status from mnode
        peer_status(cls.mnode)

        for server in cls.servers:
            mount_info = [{
                'protocol': 'glusterfs',
                'mountpoint': '/run/gluster/shared_storage',
                'server': server,
                'client': {
                    'host': server
                },
                'volname': 'gluster_shared_storage',
                'options': ''
            }]

            mount_obj = create_mount_objs(mount_info)
            if not mount_obj[0].is_mounted():
                ret = mount_obj[0].mount()
                if not ret:
                    raise ExecutionError(
                        "Unable to mount volume '%s:%s' "
                        "on '%s:%s'" %
                        (mount_obj.server_system, mount_obj.volname,
                         mount_obj.client_system, mount_obj.mountpoint))

        # Setup Volume
        ret = setup_volume(mnode=cls.mnode,
                           all_servers_info=cls.all_servers_info,
                           volume_config=cls.volume,
                           force=True)
        if not ret:
            raise ExecutionError("Setup volume %s failed", cls.volume)
        time.sleep(10)

        # Export volume with nfs ganesha, if it is not exported already
        vol_option = get_volume_options(cls.mnode,
                                        cls.volname,
                                        option='ganesha.enable')
        if vol_option is None:
            raise ExecutionError("Failed to get ganesha.enable volume option "
                                 "for %s " % cls.volume)
        if vol_option['ganesha.enable'] != 'on':
            ret, out, err = export_nfs_ganesha_volume(mnode=cls.mnode,
                                                      volname=cls.volname)
            if ret != 0:
                raise ExecutionError(
                    "Failed to export volume %s "
                    "as NFS export", cls.volname)
            time.sleep(5)

        ret = wait_for_nfs_ganesha_volume_to_get_exported(
            cls.mnode, cls.volname)
        if not ret:
            raise ExecutionError("Failed to export volume %s. volume is "
                                 "not listed in showmount" % cls.volname)
        else:
            g.log.info("Volume %s is exported successfully" % cls.volname)

        # Log Volume Info and Status
        ret = log_volume_info_and_status(cls.mnode, cls.volname)
        if not ret:
            raise ExecutionError("Logging volume %s info and status failed",
                                 cls.volname)

        # Create Mounts
        _rc = True
        for mount_obj in cls.mounts:
            ret = mount_obj.mount()
            if not ret:
                g.log.error("Unable to mount volume '%s:%s' on '%s:%s'",
                            mount_obj.server_system, mount_obj.volname,
                            mount_obj.client_system, mount_obj.mountpoint)
                _rc = False
        if not _rc:
            raise ExecutionError("Mounting volume %s on few clients failed",
                                 cls.volname)

        # Get info of mount before the IO
        log_mounts_info(cls.mounts)
    def setUpClass(cls, mount_vol=True):
        """Setup volume, shares/exports volume for cifs/nfs protocols,
            mounts the volume.
        """
        GlusterBaseClass.setUpClass.im_func(cls)

        # Validate if peer is connected from all the servers
        for server in cls.servers:
            ret = is_peer_connected(server, cls.servers)
            if not ret:
                raise ExecutionError("Validating Peers to be in Cluster "
                                     "Failed")
        g.log.info("All peers are in connected state")

        # Peer Status from mnode
        peer_status(cls.mnode)

        # Setup Volume
        ret = setup_volume(mnode=cls.mnode,
                           all_servers_info=cls.all_servers_info,
                           volume_config=cls.volume, force=True)
        if not ret:
            raise ExecutionError("Setup volume %s failed", cls.volname)
        time.sleep(10)

        # Export/Share the volume based on mount_type
        if cls.mount_type != "glusterfs":
            if "nfs" in cls.mount_type:
                ret = export_volume_through_nfs(
                    mnode=cls.mnode, volname=cls.volname,
                    enable_ganesha=cls.enable_nfs_ganesha)
                if not ret:
                    raise ExecutionError("Failed to export volume %s "
                                         "as NFS export", cls.volname)

                # Set NFS-Ganesha specific volume options
                if cls.enable_nfs_ganesha and cls.nfs_ganesha_export_options:
                    g.log.info("Setting NFS-Ganesha export specific "
                               "volume options")
                    ret = set_volume_options(
                        mnode=cls.mnode, volname=cls.volname,
                        options=cls.nfs_ganesha_export_options)
                    if not ret:
                        raise ExecutionError("Failed to set NFS-Ganesha "
                                             "export specific options on "
                                             "volume %s", cls.volname)
                    g.log.info("Successful in setting NFS-Ganesha export "
                               "specific volume options")

            if "smb" in cls.mount_type or "cifs" in cls.mount_type:
                ret = share_volume_over_smb(mnode=cls.mnode,
                                            volname=cls.volname,
                                            smb_users_info=cls.smb_users_info)
                if not ret:
                    raise ExecutionError("Failed to export volume %s "
                                         "as SMB Share", cls.volname)

                # Set SMB share specific volume options
                if cls.smb_share_options:
                    g.log.info("Setting SMB share specific volume options")
                    ret = set_volume_options(mnode=cls.mnode,
                                             volname=cls.volname,
                                             options=cls.smb_share_options)
                    if not ret:
                        raise ExecutionError("Failed to set SMB share "
                                             "specific options "
                                             "on volume %s", cls.volname)
                    g.log.info("Successful in setting SMB share specific "
                               "volume options")

        # Log Volume Info and Status
        ret = log_volume_info_and_status(cls.mnode, cls.volname)
        if not ret:
            raise ExecutionError("Logging volume %s info and status failed",
                                 cls.volname)

        # Create Mounts
        if mount_vol:
            _rc = True
            g.log.info("Starting to mount volume")
            for mount_obj in cls.mounts:
                ret = mount_obj.mount()
                if not ret:
                    g.log.error("Unable to mount volume '%s:%s' on '%s:%s'",
                                mount_obj.server_system, mount_obj.volname,
                                mount_obj.client_system, mount_obj.mountpoint)
                    _rc = False
            if not _rc:
                raise ExecutionError("Mounting volume %s on few clients "
                                     "failed", cls.volname)
            else:
                g.log.info("Successful in mounting volume on all clients")

            # Get info of mount before the IO
            g.log.info("Get mounts Info:")
            log_mounts_info(cls.mounts)
        else:
            g.log.info("Not Mounting the volume as 'mount_vol' option is "
                       "set to %s", mount_vol)
    def setUpClass(cls):
        """Setup volume, shares/exports volume for cifs/nfs protocols,
            mounts the volume.
        """
        GlusterBaseClass.setUpClass.im_func(cls)

        # Validate if peer is connected from all the servers
        for server in cls.servers:
            ret = is_peer_connected(server, cls.servers)
            if not ret:
                raise ExecutionError("Validating Peers to be in Cluster "
                                     "Failed")
        g.log.info("All peers are in connected state")

        # Peer Status from mnode
        peer_status(cls.mnode)

        # Setup Volume
        ret = setup_volume(mnode=cls.mnode,
                           all_servers_info=cls.all_servers_info,
                           volume_config=cls.volume,
                           force=True)
        if not ret:
            raise ExecutionError("Setup volume %s failed", cls.volname)
        time.sleep(10)

        # Export/Share the volume based on mount_type
        if cls.mount_type != "glusterfs":
            if "nfs" in cls.mount_type:
                ret = export_volume_through_nfs(
                    mnode=cls.mnode,
                    volname=cls.volname,
                    enable_ganesha=cls.enable_nfs_ganesha)
                if not ret:
                    raise ExecutionError(
                        "Failed to export volume %s "
                        "as NFS export", cls.volname)

            if "smb" in cls.mount_type or "cifs" in cls.mount_type:
                ret = share_volume_over_smb(mnode=cls.mnode,
                                            volname=cls.volname,
                                            smb_users_info=cls.smb_users_info)
                if not ret:
                    raise ExecutionError(
                        "Failed to export volume %s "
                        "as SMB Share", cls.volname)

        # Log Volume Info and Status
        ret = log_volume_info_and_status(cls.mnode, cls.volname)
        if not ret:
            raise ExecutionError("Logging volume %s info and status failed",
                                 cls.volname)

        # Create Mounts
        _rc = True
        for mount_obj in cls.mounts:
            ret = mount_obj.mount()
            if not ret:
                g.log.error("Unable to mount volume '%s:%s' on '%s:%s'",
                            mount_obj.server_system, mount_obj.volname,
                            mount_obj.client_system, mount_obj.mountpoint)
                _rc = False
        if not _rc:
            raise ExecutionError("Mounting volume %s on few clients failed",
                                 cls.volname)

        # Get info of mount before the IO
        log_mounts_info(cls.mounts)