def wait_for_nfs_ganesha_volume_to_get_unexported(mnode, volname, timeout=120):
    """Waits for the nfs ganesha volume to get unexported

    Args:
        mnode (str): Node on which command has to be executed.
        volname (str): volume name

    Kwargs:
        timeout (int): timeout value in seconds to wait for volume
            to get unexported

    Returns:
        True on success, False otherwise

    Examples:
        >>> wait_for_volume_to_get_unexported("abc.com", "testvol")
    """
    count = 0
    flag = 0
    while (count < timeout):
        if not is_volume_exported(mnode, volname, "nfs"):
            flag = 1
            break

        time.sleep(10)
        count = count + 10
    if not flag:
        g.log.error("Failed to unexport volume %s" % volname)
        return False

    return True
    def tearDownClass(cls,
                      umount_vol=True,
                      cleanup_vol=True,
                      teardown_nfs_ganesha_cluster=True):
        """Teardown the export, mounts and volume.
        """

        # Unmount volume
        if umount_vol:
            _rc = True
            for mount_obj in cls.mounts:
                ret = mount_obj.unmount()
                if not ret:
                    g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'",
                                mount_obj.server_system, mount_obj.volname,
                                mount_obj.client_system, mount_obj.mountpoint)
                    _rc = False
            if not _rc:
                raise ExecutionError("Unmount of all mounts are not "
                                     "successful")

        # Cleanup volume
        if cleanup_vol:

            volinfo = get_volume_info(cls.mnode, cls.volname)
            if volinfo is None or cls.volname not in volinfo:
                g.log.info("Volume %s does not exist in %s" %
                           (cls.volname, cls.mnode))
            else:
                # Unexport volume, if it is not unexported already
                vol_option = get_volume_options(cls.mnode,
                                                cls.volname,
                                                option='ganesha.enable')
                if vol_option is None:
                    raise ExecutionError("Failed to get ganesha.enable volume "
                                         " option for %s " % cls.volume)
                if vol_option['ganesha.enable'] != 'off':
                    if is_volume_exported(cls.mnode, cls.volname, "nfs"):
                        ret, out, err = unexport_nfs_ganesha_volume(
                            mnode=cls.mnode, volname=cls.volname)
                        if ret != 0:
                            raise ExecutionError(
                                "Failed to unexport volume %s" % cls.volname)
                        time.sleep(5)
                else:
                    g.log.info("Volume %s is unexported already" % cls.volname)

                _, _, _ = g.run(cls.mnode, "showmount -e")

            ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
            if not ret:
                raise ExecutionError("cleanup volume %s failed", cls.volname)

        # All Volume Info
        volume_info(cls.mnode)

        (NfsGaneshaClusterSetupClass.tearDownClass.im_func(
            cls, delete_nfs_ganesha_cluster=teardown_nfs_ganesha_cluster))
Beispiel #3
0
def export_volume_through_nfs(mnode, volname, enable_ganesha=False,
                              time_delay=30):
    """Export the volume through nfs

    Args:
        mnode (str): Node on which cmd has to be executed.
        volname (str): volume name
        enable_ganesha (bool): Enable ganesha for the volume.
        time_delay (int): Time to wait after the volume set operations
            to validate whether the volume is exported or not.

    Returns:
        bool: If volume is successfully exported through nfs returns True.
            False Otherwise.
    """
    # Enable ganesha on the volume if enable_ganesha is True
    if enable_ganesha:
        cmd = ("gluster volume set %s ganesha.enable on --mode=script" %
               volname)
        ret, _, _ = g.run(mnode, cmd)
        if ret != 0:
            g.log.error("Failed to enable nfs ganesha for volume %s", volname)
            return False
    else:
        # Enable nfs on the volume
        cmd = ("gluster volume set %s nfs.disable off --mode=script" % volname)
        ret, _, _ = g.run(mnode, cmd)
        if ret != 0:
            g.log.error("Failed to enable nfs for the volume %s", volname)
            return False

    time.sleep(time_delay)
    # Verify if volume is exported
    ret = is_volume_exported(mnode, volname, "nfs")
    if not ret:
        g.log.info("Volume %s is not exported as 'nfs' export", volname)
        return False

    return True
    def tearDown(self):

        # Clean up the volumes created specific for this tests.
        for i in range(5):
            volname = "nfsvol" + str(i)
            volinfo = get_volume_info(self.mnode, volname)
            if volinfo is None or volname not in volinfo:
                g.log.info("Volume %s does not exist in %s", volname,
                           self.mnode)
                continue

            # Unexport volume, if it is not unexported already
            vol_option = get_volume_options(self.mnode,
                                            volname,
                                            option='ganesha.enable')
            if vol_option is None:
                raise ExecutionError("Failed to get ganesha.enable volume "
                                     " option for %s " % volname)
            if vol_option['ganesha.enable'] != 'off':
                if is_volume_exported(self.mnode, volname, "nfs"):
                    ret, _, _ = unexport_nfs_ganesha_volume(mnode=self.mnode,
                                                            volname=volname)
                    if ret != 0:
                        raise ExecutionError("Failed to unexport volume %s " %
                                             volname)
                    time.sleep(5)
            else:
                g.log.info("Volume %s is unexported already", volname)

            _, _, _ = g.run(self.mnode, "showmount -e")

            ret = cleanup_volume(mnode=self.mnode, volname=volname)
            if not ret:
                raise ExecutionError("cleanup volume %s failed" % volname)

        NfsGaneshaIOBaseClass.tearDown.im_func(self)
Beispiel #5
0
def share_volume_over_smb(mnode, volname, smb_users_info):
    """Sharing volumes over SMB

    Args:
        mnode (str): Node on which commands has to be executed.
        volname (str): Name of the volume to be shared.
        smb_users_info (dict): Dict containing users info. Example:
            smb_users_info = {
                'root': {'password': '******',
                         'acl': ''
                         },
                'user1': {'password': '******',
                          'acl': ''
                          },
                'user2': {'password': '******',
                          'acl': ''
                          }
                }

    Returns:
        bool : True on successfully sharing the volume over SMB.
            False otherwise
    """
    g.log.info("Start sharing the volume over SMB")

    # Set volume option 'stat-prefetch' to 'on'.
    cmd = "gluster volume set %s stat-prefetch on" % volname
    ret, _, _ = g.run(mnode, cmd)
    if ret != 0:
        g.log.error("Failed to set the volume option stat-prefetch on")
        return False
    g.log.info("Successfully set 'stat-prefetch' to 'on' on %s", volname)

    # Set volume option 'server.allow-insecure' to 'on'.
    cmd = "gluster volume set %s server.allow-insecure on" % volname
    ret, _, _ = g.run(mnode, cmd)
    if ret != 0:
        g.log.error("Failed to set the volume option server-allow-insecure")
        return False
    g.log.info("Successfully set 'server-allow-insecure' to 'on' on %s",
               volname)

    # Set 'storage.batch-fsync-delay-usec' to 0.
    # This is to ensure ping_pong's lock and I/O coherency tests works on CIFS.
    cmd = ("gluster volume set %s storage.batch-fsync-delay-usec 0" % volname)
    ret, _, _ = g.run(mnode, cmd)
    if ret != 0:
        g.log.error(
            "Failed to set the volume option "
            "'storage.batch-fsync-delay-usec' to 0 on %s", volname)
        return False
    g.log.info("Successfully set 'storage.batch-fsync-delay-usec' to 0 on %s",
               volname)

    # Verify if the volume can be accessed from the SMB/CIFS share.
    cmd = ("smbclient -L localhost -U | grep -i -Fw gluster-%s " % volname)
    ret, _, _ = g.run(mnode, cmd)
    if ret != 0:
        g.log.error("volume '%s' not accessable via SMB/CIFS share", volname)
        return False
    g.log.info("volume '%s' can be accessed from SMB/CIFS share", volname)

    # To verify if the SMB/CIFS share can be accessed by the root/non-root user
    # TBD

    # Enable mounting volumes over SMB
    ret = enable_mounting_volume_over_smb(mnode, volname, smb_users_info)
    if not ret:
        g.log.error("Failed to enable mounting volumes using SMB")
        return False
    g.log.info(
        "Successfully enabled mounting volumes using SMV for the "
        "smbusers: %s", str(smb_users_info.keys()))

    # Verify if volume is shared
    ret = is_volume_exported(mnode, volname, "smb")
    if not ret:
        g.log.info("Volume %s is not exported as 'cifs/smb' share", volname)
        return False
    g.log.info("Volume %s is exported as 'cifs/smb' share", volname)

    return True
Beispiel #6
0
    def test_auth_invalid_values(self):
        """
        Verify negative scenario in authentication allow and reject options by
        trying to set invalid values.
        Steps:
        1. Create and start volume.
        2. Try to set the value "a/a", "192.{}.1.2", "/d1(a/a)",
           "/d1(192.{}.1.2)" separately in auth.allow option.
        3. Try to set the value "a/a", "192.{}.1.2", "/d1(a/a)",
           "/d1(192.{}.1.2)" separately in auth.reject option.
        4. Steps 2 and 3 should fail due to error "not a valid
           internet-address-list"
        5. Verify volume is exported as nfs.
        6. Try to set the value "a/a", "192.{}.1.2", "/d1(a/a)",
           "/d1(192.{}.1.2)" separately in nfs.rpc-auth-allow option.
        7. Try to set the value "a/a", "192.{}.1.2", "/d1(a/a)",
           "/d1(192.{}.1.2)" separately in nfs.rpc-auth-reject option.
        8. Steps 6 and 7 should fail due to error "not a valid
           mount-auth-address"
        """
        invalid_values = ["a/a", "192.{}.1.2", "/d1(a/a)", "/d1(192.{}.1.2)"]

        if self.mount_type == "glusterfs":
            # Try to set invalid values in auth.allow option.
            ret = self.set_invalid_auth("auth.allow", invalid_values)
            self.assertTrue(ret, "Failure of command to set auth.allow value "
                                 "is not because of invalid values.")
            g.log.info("Successfully verified auth.allow set command using"
                       " invalid values. Command failed as expected.")

            # Try to set invalid values in auth.reject option.
            ret = self.set_invalid_auth("auth.reject", invalid_values)
            self.assertTrue(ret, "Failure of command to set auth.reject value"
                                 " is not because of invalid values.")
            g.log.info("Successfully verified auth.reject set command using"
                       " invalid values. Command failed as expected.")

        if self.mount_type == "nfs":
            # Check whether volume is exported as gnfs
            ret = is_volume_exported(self.mnode, self.volname,
                                     self.mount_type)
            self.assertTrue(ret, "Volume is not exported as nfs")

            # Enable nfs.addr-namelookup option.
            ret = set_volume_options(self.mnode, self.volname,
                                     {"nfs.addr-namelookup": "enable"})
            self.assertTrue(ret, "Failed to enable nfs.addr-namelookup "
                                 "option.")

            # Try to set invalid values in nfs.rpc-auth-allow option.
            ret = self.set_invalid_auth("nfs.rpc-auth-allow", invalid_values)
            self.assertTrue(ret, "Command failure to set nfs.rpc-auth-allow"
                                 " value is not because of invalid values.")
            g.log.info("Successfully verified nfs.rpc-auth-allow set command"
                       " using invalid values. Command failed as expected.")

            # Try to set invalid values in nfs.rpc-auth-reject option.
            self.set_invalid_auth("nfs.rpc-auth-reject", invalid_values)
            self.assertTrue(ret, "Command failure to set nfs.rpc-auth-reject"
                                 " value is not because of invalid values.")
            g.log.info("Successfully verified nfs.rpc-auth-reject set command"
                       " using invalid values. Command failed as expected.")