Ejemplo n.º 1
0
    def test_peer_status(self):
        """Testing peer status command
        """
        # peer status from mnode
        g.log.info("Get peer status from node %s", self.mnode)
        ret, out, err = peer_status(self.mnode)
        self.assertEqual(ret, 0, ("Failed to get peer status from node "
                                  "%s: %s", self.mnode, err))
        g.log.info("Successfully got peer status from node %s:\n%s",
                   self.mnode, out)

        # Get peer status randomly from some node
        random_server = random.choice(self.servers)
        g.log.info("Get peer status from node %s", random_server)
        ret, out, err = pool_list(random_server)
        self.assertEqual(ret, 0, ("Failed to get peer status from node "
                                  "%s: %s", random_server, err))
        g.log.info("Successfully got peer status from node %s:\n%s",
                   random_server, out)

        # Get peer status output from all servers
        for server in self.servers:
            g.log.info("Peer status on node %s", server)
            ret, out, err = peer_status(server)
            self.assertEqual(ret, 0, ("Failed to get peer status from node "
                                      "%s: %s", server, err))
            g.log.info("Successfully got peer status from node %s:\n%s",
                       server, out)
Ejemplo n.º 2
0
    def validate_peers_are_connected(cls):
        """Validate whether each server in the cluster is connected to
        all other servers in cluster.

        Returns (bool): True if all peers are in connected with other peers.
            False otherwise.
        """
        # Validate if peer is connected from all the servers
        g.log.info(
            "Validating if servers %s are connected from other servers "
            "in the cluster", cls.servers)
        for server in cls.servers:
            g.log.info("Validate servers %s are in connected from  node %s",
                       cls.servers, server)
            ret = is_peer_connected(server, cls.servers)
            if not ret:
                g.log.error(
                    "Some or all servers %s are not in connected "
                    "state from node %s", cls.servers, server)
                return False
            g.log.info(
                "Successfully validated servers %s are all in "
                "connected state from node %s", cls.servers, server)
        g.log.info(
            "Successfully validated all servers %s are in connected "
            "state from other servers in the cluster", cls.servers)

        # Peer Status from mnode
        peer_status(cls.mnode)

        return True
    def are_peers_in_connected_state(self):
        """Validate if all the peers are in connected state from all servers.
        """
        _rc = True
        # Validate if peer is connected from all the servers
        for server in self.servers:
            ret = is_peer_connected(server, self.servers)
            if not ret:
                _rc = False

        # Peer Status from mnode
        peer_status(self.mnode)

        return _rc
Ejemplo n.º 4
0
    def test_ops_when_one_node_is_down(self):

        # pylint: disable=too-many-statements
        """
        Test Case:
        1) Create a N node gluster cluster.
        2) Stop gluster on one node.
        3) Execute gluster peer status on other node.
        4) Execute gluster v list on other node.
        5) Execute gluster v info on other node.
        """

        # Fetching a random server from list.
        self.random_server = randint(1, len(self.servers) - 1)

        # Stopping glusterd on one node.
        ret = stop_glusterd(self.servers[self.random_server])
        self.assertTrue(ret, "Failed to stop glusterd on one node.")
        g.log.info("Successfully stopped glusterd on one node.")

        # Running peer status on another node.
        ret, _, err = peer_status(self.mnode)
        self.assertEqual(ret, 0, ("Failed to get peer status from %s with "
                                  "error message %s" % (self.mnode, err)))
        g.log.info("Successfully got peer status from %s.", self.mnode)

        # Running volume list on another node.
        ret, _, _ = volume_list(self.mnode)
        self.assertEqual(ret, 0, "Failed to get volume list.")
        g.log.info("Successfully got volume list from %s.", self.mnode)

        # Running volume info on another node.
        ret, _, _ = volume_info(self.mnode)
        self.assertEqual(ret, 0, "Failed to get volume info.")
        g.log.info("Successfully got volume info from %s.", self.mnode)
Ejemplo n.º 5
0
    def setUpClass(cls):
        """
        """
        # Read all the cluster config from the g.config and assign it to
        # class variables
        GlusterBaseClass.setUpClass.im_func(cls)

        # Detach all the servers if it's already attached to the cluster
        nodes_in_pool_list = nodes_from_pool_list(cls.mnode)
        if nodes_in_pool_list is None:
            g.log.error(
                "Unable to get nodes from gluster pool list "
                "from node %s", cls.mnode)
        else:
            g.log.info("Nodes in pool: %s", nodes_in_pool_list)

        if nodes_in_pool_list:
            if cls.mnode in nodes_in_pool_list:
                nodes_in_pool_list.remove(cls.mnode)
            g.log.info("Detaching servers '%s' from the cluster from node %s",
                       nodes_in_pool_list, cls.mnode)
            ret = peer_detach_servers(cls.mnode, nodes_in_pool_list)
            if not ret:
                raise ExecutionError(
                    "Failed to detach some or all "
                    "servers %s from the cluster "
                    "from node %s", nodes_in_pool_list, cls.mnode)
            g.log.info(
                "Successfully detached all servers '%s' "
                "from the cluster from node %s", nodes_in_pool_list, cls.mnode)

        # Get pool list from mnode
        g.log.info("Pool list on node %s", cls.mnode)
        ret, out, err = pool_list(cls.mnode)
        if ret != 0:
            raise ExecutionError("Failed to get pool list on node %s: %s",
                                 cls.mnode, err)
        g.log.info("Successfully got pool list on node %s:\n%s", cls.mnode,
                   out)

        # Get peer status output from all servers
        for server in cls.servers:
            g.log.info("Peer status on node %s", server)
            ret, out, err = peer_status(server)
            if ret != 0:
                raise ExecutionError(
                    "Failed to get peer status on node %s: "
                    "%s", server, err)
            g.log.info("Successfully got peer status on node %s:\n%s", server,
                       out)
Ejemplo n.º 6
0
    def setUpClass(cls):
        """Setup volume exports volume with nfs-ganesha,
            mounts the volume.
        """
        NfsGaneshaClusterSetupClass.setUpClass.im_func(cls)

        # Peer probe servers
        ret = peer_probe_servers(cls.mnode, cls.servers)
        if not ret:
            raise ExecutionError("Failed to peer probe servers")

        g.log.info("All peers are in connected state")

        # Peer Status from mnode
        peer_status(cls.mnode)

        for server in cls.servers:
            mount_info = [{
                'protocol': 'glusterfs',
                'mountpoint': '/run/gluster/shared_storage',
                'server': server,
                'client': {
                    'host': server
                },
                'volname': 'gluster_shared_storage',
                'options': ''
            }]

            mount_obj = create_mount_objs(mount_info)
            if not mount_obj[0].is_mounted():
                ret = mount_obj[0].mount()
                if not ret:
                    raise ExecutionError(
                        "Unable to mount volume '%s:%s' "
                        "on '%s:%s'" %
                        (mount_obj.server_system, mount_obj.volname,
                         mount_obj.client_system, mount_obj.mountpoint))

        # Setup Volume
        ret = setup_volume(mnode=cls.mnode,
                           all_servers_info=cls.all_servers_info,
                           volume_config=cls.volume,
                           force=True)
        if not ret:
            raise ExecutionError("Setup volume %s failed", cls.volume)
        time.sleep(10)

        # Export volume with nfs ganesha, if it is not exported already
        vol_option = get_volume_options(cls.mnode,
                                        cls.volname,
                                        option='ganesha.enable')
        if vol_option is None:
            raise ExecutionError("Failed to get ganesha.enable volume option "
                                 "for %s " % cls.volume)
        if vol_option['ganesha.enable'] != 'on':
            ret, out, err = export_nfs_ganesha_volume(mnode=cls.mnode,
                                                      volname=cls.volname)
            if ret != 0:
                raise ExecutionError(
                    "Failed to export volume %s "
                    "as NFS export", cls.volname)
            time.sleep(5)

        ret = wait_for_nfs_ganesha_volume_to_get_exported(
            cls.mnode, cls.volname)
        if not ret:
            raise ExecutionError("Failed to export volume %s. volume is "
                                 "not listed in showmount" % cls.volname)
        else:
            g.log.info("Volume %s is exported successfully" % cls.volname)

        # Log Volume Info and Status
        ret = log_volume_info_and_status(cls.mnode, cls.volname)
        if not ret:
            raise ExecutionError("Logging volume %s info and status failed",
                                 cls.volname)

        # Create Mounts
        _rc = True
        for mount_obj in cls.mounts:
            ret = mount_obj.mount()
            if not ret:
                g.log.error("Unable to mount volume '%s:%s' on '%s:%s'",
                            mount_obj.server_system, mount_obj.volname,
                            mount_obj.client_system, mount_obj.mountpoint)
                _rc = False
        if not _rc:
            raise ExecutionError("Mounting volume %s on few clients failed",
                                 cls.volname)

        # Get info of mount before the IO
        log_mounts_info(cls.mounts)
Ejemplo n.º 7
0
    def test_peer_probe_status(self):

        # get FQDN of node1 and node2
        node1 = socket.getfqdn(self.mnode)
        node2 = socket.getfqdn(self.servers[1])

        # peer probe to a new node, N2 from N1
        ret, _, err = peer_probe(node1, node2)
        self.assertEqual(ret, 0, ("Peer probe failed to %s from %s with "
                                  "error message %s" % (self.servers[1],
                                                        self.mnode, err)))
        g.log.info("Peer probe from %s to %s is success", self.mnode,
                   self.servers[1])

        # check peer status in both the nodes, it should have FQDN
        # from node1
        ret, out, err = peer_status(self.mnode)
        self.assertEqual(ret, 0, ("Failed to get peer status from %s with "
                                  "error message %s" % (self.mnode, err)))
        g.log.info("Successfully got peer status from %s", self.mnode)

        self.assertIn(node2, out, ("FQDN of %s is not present in the "
                                   "output of peer status from %s"
                                   % (self.servers[1], self.mnode)))
        g.log.info("FQDN of %s is present in peer status of %s",
                   self.servers[1], self.mnode)

        # from node2
        ret, out, err = peer_status(self.servers[1])
        self.assertEqual(ret, 0, ("Failed to get peer status from %s with "
                                  "error message %s" % (self.servers[1], err)))
        g.log.info("Successfully got peer status from %s", self.servers[1])

        self.assertIn(node1, out, ("FQDN of %s is not present in the "
                                   "output of peer status from %s"
                                   % (self.mnode, self.servers[1])))
        g.log.info("FQDN of %s is present in peer status of %s",
                   self.mnode, self.servers[1])

        # create a distributed volume with 2 bricks
        servers_info_from_two_node_cluster = {}
        for server in self.servers[0:2]:
            servers_info_from_two_node_cluster[
                server] = self.all_servers_info[server]

        self.volume['servers'] = self.servers[0:2]
        self.volume['voltype']['dist_count'] = 2
        ret = setup_volume(self.mnode, servers_info_from_two_node_cluster,
                           self.volume)
        self.assertTrue(ret, ("Failed to create "
                              "and start volume %s" % self.volname))
        g.log.info("Successfully created and started the volume %s",
                   self.volname)

        # peer probe to a new node, N3
        ret, _, err = peer_probe(self.mnode, self.servers[2])
        self.assertEqual(ret, 0, ("Peer probe failed to %s from %s with "
                                  "error message %s" % (self.servers[2],
                                                        self.mnode, err)))
        g.log.info("Peer probe from %s to %s is success", self.mnode,
                   self.servers[2])

        # add a brick from N3 to the volume
        num_bricks_to_add = 1
        server_info = {}
        server_info[self.servers[2]] = self.all_servers_info[self.servers[2]]
        brick = form_bricks_list(self.mnode, self.volname, num_bricks_to_add,
                                 self.servers[2], server_info)
        ret, _, _ = add_brick(self.mnode, self.volname, brick)
        self.assertEqual(ret, 0, ("Failed to add brick to volume %s"
                                  % self.volname))
        g.log.info("add brick to the volume %s is success", self.volname)

        # get volume info, it should have correct brick information
        ret = get_volume_info(self.mnode, self.volname)
        self.assertIsNotNone(ret, ("Failed to get volume info from %s"
                                   % self.mnode))
        g.log.info("volume info from %s is success", self.mnode)

        brick3 = ret[self.volname]['bricks']['brick'][2]['name']
        self.assertEqual(brick3, str(brick[0]), ("Volume info has incorrect "
                                                 "information"))
        g.log.info("Volume info has correct information")
    def setUpClass(cls, mount_vol=True):
        """Setup volume, shares/exports volume for cifs/nfs protocols,
            mounts the volume.
        """
        GlusterBaseClass.setUpClass.im_func(cls)

        # Validate if peer is connected from all the servers
        for server in cls.servers:
            ret = is_peer_connected(server, cls.servers)
            if not ret:
                raise ExecutionError("Validating Peers to be in Cluster "
                                     "Failed")
        g.log.info("All peers are in connected state")

        # Peer Status from mnode
        peer_status(cls.mnode)

        # Setup Volume
        ret = setup_volume(mnode=cls.mnode,
                           all_servers_info=cls.all_servers_info,
                           volume_config=cls.volume, force=True)
        if not ret:
            raise ExecutionError("Setup volume %s failed", cls.volname)
        time.sleep(10)

        # Export/Share the volume based on mount_type
        if cls.mount_type != "glusterfs":
            if "nfs" in cls.mount_type:
                ret = export_volume_through_nfs(
                    mnode=cls.mnode, volname=cls.volname,
                    enable_ganesha=cls.enable_nfs_ganesha)
                if not ret:
                    raise ExecutionError("Failed to export volume %s "
                                         "as NFS export", cls.volname)

                # Set NFS-Ganesha specific volume options
                if cls.enable_nfs_ganesha and cls.nfs_ganesha_export_options:
                    g.log.info("Setting NFS-Ganesha export specific "
                               "volume options")
                    ret = set_volume_options(
                        mnode=cls.mnode, volname=cls.volname,
                        options=cls.nfs_ganesha_export_options)
                    if not ret:
                        raise ExecutionError("Failed to set NFS-Ganesha "
                                             "export specific options on "
                                             "volume %s", cls.volname)
                    g.log.info("Successful in setting NFS-Ganesha export "
                               "specific volume options")

            if "smb" in cls.mount_type or "cifs" in cls.mount_type:
                ret = share_volume_over_smb(mnode=cls.mnode,
                                            volname=cls.volname,
                                            smb_users_info=cls.smb_users_info)
                if not ret:
                    raise ExecutionError("Failed to export volume %s "
                                         "as SMB Share", cls.volname)

                # Set SMB share specific volume options
                if cls.smb_share_options:
                    g.log.info("Setting SMB share specific volume options")
                    ret = set_volume_options(mnode=cls.mnode,
                                             volname=cls.volname,
                                             options=cls.smb_share_options)
                    if not ret:
                        raise ExecutionError("Failed to set SMB share "
                                             "specific options "
                                             "on volume %s", cls.volname)
                    g.log.info("Successful in setting SMB share specific "
                               "volume options")

        # Log Volume Info and Status
        ret = log_volume_info_and_status(cls.mnode, cls.volname)
        if not ret:
            raise ExecutionError("Logging volume %s info and status failed",
                                 cls.volname)

        # Create Mounts
        if mount_vol:
            _rc = True
            g.log.info("Starting to mount volume")
            for mount_obj in cls.mounts:
                ret = mount_obj.mount()
                if not ret:
                    g.log.error("Unable to mount volume '%s:%s' on '%s:%s'",
                                mount_obj.server_system, mount_obj.volname,
                                mount_obj.client_system, mount_obj.mountpoint)
                    _rc = False
            if not _rc:
                raise ExecutionError("Mounting volume %s on few clients "
                                     "failed", cls.volname)
            else:
                g.log.info("Successful in mounting volume on all clients")

            # Get info of mount before the IO
            g.log.info("Get mounts Info:")
            log_mounts_info(cls.mounts)
        else:
            g.log.info("Not Mounting the volume as 'mount_vol' option is "
                       "set to %s", mount_vol)
Ejemplo n.º 9
0
    def setUpClass(cls):
        """Setup volume, shares/exports volume for cifs/nfs protocols,
            mounts the volume.
        """
        GlusterBaseClass.setUpClass.im_func(cls)

        # Validate if peer is connected from all the servers
        for server in cls.servers:
            ret = is_peer_connected(server, cls.servers)
            if not ret:
                raise ExecutionError("Validating Peers to be in Cluster "
                                     "Failed")
        g.log.info("All peers are in connected state")

        # Peer Status from mnode
        peer_status(cls.mnode)

        # Setup Volume
        ret = setup_volume(mnode=cls.mnode,
                           all_servers_info=cls.all_servers_info,
                           volume_config=cls.volume,
                           force=True)
        if not ret:
            raise ExecutionError("Setup volume %s failed", cls.volname)
        time.sleep(10)

        # Export/Share the volume based on mount_type
        if cls.mount_type != "glusterfs":
            if "nfs" in cls.mount_type:
                ret = export_volume_through_nfs(
                    mnode=cls.mnode,
                    volname=cls.volname,
                    enable_ganesha=cls.enable_nfs_ganesha)
                if not ret:
                    raise ExecutionError(
                        "Failed to export volume %s "
                        "as NFS export", cls.volname)

            if "smb" in cls.mount_type or "cifs" in cls.mount_type:
                ret = share_volume_over_smb(mnode=cls.mnode,
                                            volname=cls.volname,
                                            smb_users_info=cls.smb_users_info)
                if not ret:
                    raise ExecutionError(
                        "Failed to export volume %s "
                        "as SMB Share", cls.volname)

        # Log Volume Info and Status
        ret = log_volume_info_and_status(cls.mnode, cls.volname)
        if not ret:
            raise ExecutionError("Logging volume %s info and status failed",
                                 cls.volname)

        # Create Mounts
        _rc = True
        for mount_obj in cls.mounts:
            ret = mount_obj.mount()
            if not ret:
                g.log.error("Unable to mount volume '%s:%s' on '%s:%s'",
                            mount_obj.server_system, mount_obj.volname,
                            mount_obj.client_system, mount_obj.mountpoint)
                _rc = False
        if not _rc:
            raise ExecutionError("Mounting volume %s on few clients failed",
                                 cls.volname)

        # Get info of mount before the IO
        log_mounts_info(cls.mounts)