def tearDownClass(cls, umount_vol=True, cleanup_vol=True):
        """Teardown the mounts and volume.
        """
        GlusterBaseClass.tearDownClass.im_func(cls)

        # Unmount volume
        if umount_vol:
            _rc = True
            for mount_obj in cls.mounts:
                ret = mount_obj.unmount()
                if not ret:
                    g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'",
                                mount_obj.server_system, mount_obj.volname,
                                mount_obj.client_system, mount_obj.mountpoint)
                    _rc = False
            if not _rc:
                raise ExecutionError("Unmount of all mounts are not "
                                     "successful")

        # Cleanup volume
        if cleanup_vol:
            ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
            if not ret:
                raise ExecutionError("cleanup volume %s failed", cls.volname)

        # All Volume Info
        volume_info(cls.mnode)
    def tearDownClass(cls,
                      umount_vol=True,
                      cleanup_vol=True,
                      teardown_nfs_ganesha_cluster=True):
        """Teardown the export, mounts and volume.
        """

        # Unmount volume
        if umount_vol:
            _rc = True
            for mount_obj in cls.mounts:
                ret = mount_obj.unmount()
                if not ret:
                    g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'",
                                mount_obj.server_system, mount_obj.volname,
                                mount_obj.client_system, mount_obj.mountpoint)
                    _rc = False
            if not _rc:
                raise ExecutionError("Unmount of all mounts are not "
                                     "successful")

        # Cleanup volume
        if cleanup_vol:

            volinfo = get_volume_info(cls.mnode, cls.volname)
            if volinfo is None or cls.volname not in volinfo:
                g.log.info("Volume %s does not exist in %s" %
                           (cls.volname, cls.mnode))
            else:
                # Unexport volume, if it is not unexported already
                vol_option = get_volume_options(cls.mnode,
                                                cls.volname,
                                                option='ganesha.enable')
                if vol_option is None:
                    raise ExecutionError("Failed to get ganesha.enable volume "
                                         " option for %s " % cls.volume)
                if vol_option['ganesha.enable'] != 'off':
                    if is_volume_exported(cls.mnode, cls.volname, "nfs"):
                        ret, out, err = unexport_nfs_ganesha_volume(
                            mnode=cls.mnode, volname=cls.volname)
                        if ret != 0:
                            raise ExecutionError(
                                "Failed to unexport volume %s" % cls.volname)
                        time.sleep(5)
                else:
                    g.log.info("Volume %s is unexported already" % cls.volname)

                _, _, _ = g.run(cls.mnode, "showmount -e")

            ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
            if not ret:
                raise ExecutionError("cleanup volume %s failed", cls.volname)

        # All Volume Info
        volume_info(cls.mnode)

        (NfsGaneshaClusterSetupClass.tearDownClass.im_func(
            cls, delete_nfs_ganesha_cluster=teardown_nfs_ganesha_cluster))
Example #3
0
    def test_ops_when_one_node_is_down(self):

        # pylint: disable=too-many-statements
        """
        Test Case:
        1) Create a N node gluster cluster.
        2) Stop gluster on one node.
        3) Execute gluster peer status on other node.
        4) Execute gluster v list on other node.
        5) Execute gluster v info on other node.
        """

        # Fetching a random server from list.
        self.random_server = randint(1, len(self.servers) - 1)

        # Stopping glusterd on one node.
        ret = stop_glusterd(self.servers[self.random_server])
        self.assertTrue(ret, "Failed to stop glusterd on one node.")
        g.log.info("Successfully stopped glusterd on one node.")

        # Running peer status on another node.
        ret, _, err = peer_status(self.mnode)
        self.assertEqual(ret, 0, ("Failed to get peer status from %s with "
                                  "error message %s" % (self.mnode, err)))
        g.log.info("Successfully got peer status from %s.", self.mnode)

        # Running volume list on another node.
        ret, _, _ = volume_list(self.mnode)
        self.assertEqual(ret, 0, "Failed to get volume list.")
        g.log.info("Successfully got volume list from %s.", self.mnode)

        # Running volume info on another node.
        ret, _, _ = volume_info(self.mnode)
        self.assertEqual(ret, 0, "Failed to get volume info.")
        g.log.info("Successfully got volume info from %s.", self.mnode)
Example #4
0
    def test_default_log_level_of_cli(self):
        """
        Test Case:
        1) Create and start a volume
        2) Run volume info command
        3) Run volume status command
        4) Run volume stop command
        5) Run volume start command
        6) Check the default log level of cli.log
        """
        # Check volume info operation
        ret, _, _ = volume_info(self.mnode)
        self.assertEqual(
            ret, 0, "Failed to execute volume info"
            " command on node: %s" % self.mnode)
        g.log.info(
            "Successfully executed the volume info command on"
            " node: %s", self.mnode)

        # Check volume status operation
        ret, _, _ = volume_status(self.mnode)
        self.assertEqual(
            ret, 0, "Failed to execute volume status command"
            " on node: %s" % self.mnode)
        g.log.info(
            "Successfully executed the volume status command"
            " on node: %s", self.mnode)

        # Check volume stop operation
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to stop the volume %s on node: %s" %
            (self.volname, self.mnode))
        g.log.info("Successfully stopped the volume %s on node: %s",
                   self.volname, self.mnode)

        # Check volume start operation
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, "Failed to start the volume %s on node: %s" %
            (self.volname, self.mnode))
        g.log.info("Successfully started the volume %s on node: %s",
                   self.volname, self.mnode)

        # Check the default log level of cli.log
        cmd = 'cat /var/log/glusterfs/cli.log | grep -F "] D [" | wc -l'
        ret, out, _ = g.run(self.mnode, cmd)
        self.assertEqual(ret, 0, "Failed to execute the command")
        self.assertEqual(
            int(out), 0, "Unexpected: Default log level of "
            "cli.log is not INFO")
        g.log.info("Default log level of cli.log is INFO as expected")
    def tearDownClass(cls, umount_vol=True, cleanup_vol=True):
        """Teardown the mounts and volume.
        """
        # Unmount volume
        if umount_vol:
            _rc = True
            g.log.info("Starting to UnMount Volumes")
            for mount_obj in cls.mounts:
                ret = mount_obj.unmount()
                if not ret:
                    g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'",
                                mount_obj.server_system, mount_obj.volname,
                                mount_obj.client_system, mount_obj.mountpoint)
                    _rc = False
            if not _rc:
                raise ExecutionError("Unmount of all mounts are not "
                                     "successful")
            else:
                g.log.info("Successful in unmounting volume on all clients")
        else:
            g.log.info("Not Unmounting the Volume as 'umount_vol' is set "
                       "to %s", umount_vol)

        # Cleanup volume
        if cleanup_vol:
            ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
            if not ret:
                raise ExecutionError("cleanup volume %s failed", cls.volname)
            else:
                g.log.info("Successfully cleaned-up volume")
        else:
            g.log.info("Not Cleaning-Up volume as 'cleanup_vol' is %s",
                       cleanup_vol)

        # All Volume Info
        volume_info(cls.mnode)

        GlusterBaseClass.tearDownClass.im_func(cls)
Example #6
0
def log_volume_info_and_status(mnode, volname):
    """Logs volume info and status
    Args:
        mnode (str): Node on which cmd has to be executed.
        volname (str): volume name
    Returns:
        bool: Returns True if getting volume info and status is successful.
            False Otherwise.
    """
    ret, _, _ = volume_info(mnode, volname)
    if ret:
        g.log.error("Failed to get volume info %s", volname)
        return False

    ret, _, _ = volume_status(mnode, volname)
    if ret:
        g.log.error("Failed to get volume status %s", volname)
        return False

    return True