예제 #1
0
def cleanup_volume(mnode, volname):
    """deletes snapshots in the volume, stops and deletes the gluster
       volume if given volume exists in gluster and deletes the
       directories in the bricks associated with the given volume
    Args:
        volname (str): volume name
        mnode (str): Node on which cmd has to be executed.
    Returns:
        bool: True, if volume is deleted successfully
              False, otherwise
    Example:
        cleanup_volume("", "testvol")
    """
    volume = volume_exists(mnode, volname)
    if not volume:
        g.log.info("Volume %s does not exist in %s", volname, mnode)
        return True

    ret, _, _ = g.run(mnode, "glustercli snapshot delete all %s" % volname)
    if ret:
        g.log.error("Failed to delete the snapshots in volume %s", volname)
        return False

    ret, _, _ = volume_stop(mnode, volname)
    if ret:
        g.log.error("Failed to stop volume %s" % volname)
        return False

    ret = volume_delete(mnode, volname)
    if not ret:
        g.log.error("Unable to cleanup the volume %s", volname)
        return False

    return True
예제 #2
0
    def _volume_operations_in_loop(self):
        """ Create, start, stop and delete 100 volumes in a loop """
        # Create and start 100 volumes in a loop
        self.volume_config = {
            'name': 'volume-',
            'servers': self.servers,
            'voltype': {
                'type': 'distributed-replicated',
                'dist_count': 2,
                'replica_count': 3
            },
        }

        ret = bulk_volume_creation(self.mnode, 100, self.all_servers_info,
                                   self.volume_config, "", False, True)
        self.assertTrue(ret, "Failed to create volumes")

        self.volume_present = True

        g.log.info("Successfully created all the volumes")

        # Start 100 volumes in loop
        for i in range(100):
            self.volname = "volume-%d" % i
            ret, _, _ = volume_start(self.mnode, self.volname)
            self.assertEqual(ret, 0,
                             "Failed to start volume: %s" % self.volname)

        g.log.info("Successfully started all the volumes")

        # Stop 100 volumes in loop
        for i in range(100):
            self.volname = "volume-%d" % i
            ret, _, _ = volume_stop(self.mnode, self.volname)
            self.assertEqual(ret, 0,
                             "Failed to stop volume: %s" % self.volname)

        g.log.info("Successfully stopped all the volumes")

        # Delete 100 volumes in loop
        for i in range(100):
            self.volname = "volume-%d" % i
            ret = volume_delete(self.mnode, self.volname)
            self.assertTrue(ret, "Failed to delete volume: %s" % self.volname)

        self.volume_present = False

        g.log.info("Successfully deleted all the volumes")
예제 #3
0
    def tearDown(self):

        # Unmount and clean volume
        if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
            raise ExecutionError("Failed to Cleanup Volume")

        if self.is_second_volume_created:
            # Stop the 2nd volume
            ret, _, _ = volume_stop(self.mnode, self.second_vol_name)
            self.assertEqual(
                ret, 0, ("volume stop failed for %s" % self.second_vol_name))
            g.log.info("Volume %s stopped", self.second_vol_name)

            # Delete the 2nd volume
            ret = volume_delete(self.mnode, self.second_vol_name)
            self.assertTrue(ret, ("Failed to cleanup the Volume "
                                  "%s", self.second_vol_name))
            g.log.info("Volume deleted successfully : %s",
                       self.second_vol_name)

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
예제 #4
0
    def test_peer_probe(self):
        """
        In this test case:
        1. Create Dist Volume on Node 1
        2. Create Replica Volume on Node 2
        3. Peer Probe N2 from N1(should fail)
        4. Clean All Volumes
        5. Peer Probe N1 to N2(should success)
           Peer Probe N3 to N2(should fail)
        6. Create replica Volume on N1 and N2
        7. Peer probe from N3 to N1(should fail)
        8. Peer probe from N1 to N3(should succeed)
        9. Create replica Volume on N1, N2 and N2
        10.Start Volume
        11. delete volume (should fail)
        12. Stop volume
        13. Clean up all volumes
        """

        # pylint: disable=too-many-statements
        # Create a distributed volume on Node1
        number_of_brick = 1
        servers_info_from_single_node = {}
        servers_info_from_single_node[self.servers[0]] = self.all_servers_info[
            self.servers[0]]
        self.volname = "testvol"
        bricks_list = form_bricks_list(self.servers[0], self.volname,
                                       number_of_brick, self.servers[0],
                                       servers_info_from_single_node)
        ret, _, _ = volume_create(self.servers[0], self.volname, bricks_list,
                                  True)
        self.assertEqual(ret, 0, "Volume create failed")
        g.log.info("Volume %s created successfully", self.volname)

        # Create a replicate volume on Node2 without force
        number_of_brick = 2
        servers_info_from_single_node = {}
        servers_info_from_single_node[self.servers[1]] = self.all_servers_info[
            self.servers[1]]
        kwargs = {'replica_count': 2}
        self.volname = "new-volume"
        bricks_list = form_bricks_list(self.servers[1], self.volname,
                                       number_of_brick, self.servers[1],
                                       servers_info_from_single_node)

        # creation of replicate volume without force should fail
        ret, _, _ = volume_create(self.servers[1], self.volname, bricks_list,
                                  False, **kwargs)
        self.assertNotEqual(ret, 0, ("Unexpected: Successfully created "
                                     "the replicate volume on node2 "
                                     "without force"))
        g.log.info(
            "Failed to create the replicate volume %s as "
            " expected without force", self.volname)

        # Create a replica volume on Node2 with force
        number_of_brick = 3
        servers_info_from_single_node = {}
        servers_info_from_single_node[self.servers[1]] = self.all_servers_info[
            self.servers[1]]
        kwargs = {'replica_count': 3}
        self.volname = "new-volume"
        bricks_list = form_bricks_list(self.servers[1], self.volname,
                                       number_of_brick, self.servers[1],
                                       servers_info_from_single_node)

        # creation of replicate volume with force should succeed
        ret, _, _ = volume_create(self.servers[1], self.volname, bricks_list,
                                  True, **kwargs)
        self.assertEqual(ret, 0, "Volume create failed")
        g.log.info("Volume %s created", self.volname)

        # Perform peer probe from N1 to N2
        ret, _, _ = peer_probe(self.servers[0], self.servers[1])
        self.assertNotEqual(
            ret, 0,
            ("peer probe is success from %s to %s even if %s "
             " is a part of another cluster or having volumes "
             " configured", self.servers[0], self.servers[1], self.servers[1]))
        g.log.info("peer probe failed from %s to "
                   "%s as expected", self.servers[0], self.servers[1])

        # clean up all volumes
        for server in self.servers[0:2]:
            # Listing all the volumes
            vol_list = get_volume_list(server)
            self.assertIsNotNone(vol_list, "Unable to get volumes list")
            g.log.info("Getting the volume list from %s", self.mnode)
            for vol in vol_list:
                g.log.info("deleting volume : %s", vol)
                ret = cleanup_volume(server, vol)
                self.assertTrue(ret, ("Failed to Cleanup the Volume %s", vol))
                g.log.info("Volume deleted successfully : %s", vol)

        # Perform peer probe from N1 to N2 should success
        ret, _, _ = peer_probe(self.servers[0], self.servers[1])
        self.assertEqual(ret, 0, ("peer probe from %s to %s is "
                                  "failed", self.servers[0], self.servers[1]))
        g.log.info("peer probe is success from %s to "
                   "%s", self.servers[0], self.servers[1])

        # Checking if peer is connected
        counter = 0
        while counter < 30:
            ret = is_peer_connected(self.servers[0], self.servers[1])
            counter += 1
            if ret:
                break
            sleep(3)
        self.assertTrue(ret, "Peer is not in connected state.")
        g.log.info("Peers is in connected state.")

        # Perform peer probe from N3 to N2 should fail
        ret, _, _ = peer_probe(self.servers[2], self.servers[1])
        self.assertNotEqual(
            ret, 0,
            ("peer probe is success from %s to %s even if %s "
             "is a part of another cluster or having volumes "
             "configured", self.servers[2], self.servers[1], self.servers[1]))
        g.log.info("peer probe failed from %s to "
                   "%s as expected", self.servers[2], self.servers[1])

        # Create a replica volume on N1 and N2 with force
        number_of_brick = 2
        servers_info_from_two_node = {}
        for server in self.servers[0:2]:
            servers_info_from_two_node[server] = self.all_servers_info[server]
        kwargs = {'replica_count': 2}
        self.volname = "new-volume"
        bricks_list = form_bricks_list(self.servers[0], self.volname,
                                       number_of_brick, self.servers[0:2],
                                       servers_info_from_two_node)
        ret, _, _ = volume_create(self.servers[1], self.volname, bricks_list,
                                  True, **kwargs)
        self.assertEqual(ret, 0, "Volume create failed")
        g.log.info("Volume %s created succssfully", self.volname)

        # Perform peer probe from N3 to N1 should fail
        ret, _, _ = peer_probe(self.servers[2], self.servers[0])
        self.assertNotEqual(
            ret, 0,
            ("peer probe is success from %s to %s even if %s "
             "a part of another cluster or having volumes "
             "configured", self.servers[2], self.servers[0], self.servers[0]))
        g.log.info("peer probe is failed from %s to "
                   "%s as expected", self.servers[2], self.servers[0])

        # Perform peer probe from N1 to N3 should succed
        ret, _, _ = peer_probe(self.servers[0], self.servers[2])
        self.assertEqual(ret, 0, ("peer probe from %s to %s is "
                                  "failed", self.servers[0], self.servers[2]))
        g.log.info("peer probe is success from %s to "
                   "%s", self.servers[0], self.servers[2])

        # Checking if peer is connected
        counter = 0
        while counter < 30:
            ret = is_peer_connected(self.servers[0], self.servers[:3])
            counter += 1
            if ret:
                break
            sleep(3)
        self.assertTrue(ret, "Peer is not in connected state.")
        g.log.info("Peers is in connected state.")

        # Create a replica volume on N1, N2 and N3 with force
        number_of_brick = 3
        server_info_from_three_node = {}
        for server in self.servers[0:3]:
            server_info_from_three_node[server] = self.all_servers_info[server]
        kwargs = {'replica_count': 3}
        self.volname = "new-replica-volume"
        bricks_list = form_bricks_list(self.servers[2], self.volname,
                                       number_of_brick, self.servers[0:3],
                                       server_info_from_three_node)
        ret, _, _ = volume_create(self.servers[1], self.volname, bricks_list,
                                  True, **kwargs)
        self.assertEqual(ret, 0, "Volume create failed")
        g.log.info("creation of replica volume should succeed")

        ret, _, _ = volume_start(self.servers[2], self.volname, True)
        self.assertEqual(ret, 0, ("Failed to start the "
                                  "volume %s", self.volname))
        g.log.info("Volume %s start with force is success", self.volname)

        # Volume delete should fail without stopping volume
        self.assertTrue(
            volume_delete(self.servers[2], self.volname, xfail=True),
            "Unexpected Error: Volume deleted "
            "successfully without stopping volume")
        g.log.info(
            "Expected: volume delete should fail without "
            "stopping volume: %s", self.volname)

        # Volume stop with force
        ret, _, _ = volume_stop(self.mnode, self.volname, True)
        self.assertEqual(ret, 0, ("Failed to stop the volume "
                                  "%s", self.volname))
        g.log.info("Volume stop with force is success")
    def test_glusterd_quorum_validation(self):
        """
        -> Creating two volumes and starting them, stop the second volume
        -> set the server quorum and set the ratio to 90
        -> Stop the glusterd in one of the node, so the quorum won't meet
        -> Peer probing a new node should fail
        -> Volume stop will fail
        -> volume delete will fail
        -> volume reset will fail
        -> Start the glusterd on the node where it is stopped
        -> Volume stop, start, delete will succeed once quorum is met
        """
        # pylint: disable=too-many-statements, too-many-branches

        # Peer probe first 3 servers
        servers_info_from_three_nodes = {}
        for server in self.servers[0:3]:
            servers_info_from_three_nodes[server] = self.all_servers_info[
                server]

            # Peer probe the first 3 servers
            ret, _, _ = peer_probe(self.mnode, server)
            self.assertEqual(ret, 0,
                             ("Peer probe failed to one of the server"))
        g.log.info("Peer probe to first 3 nodes succeeded")

        self.volume['servers'] = self.servers[0:3]
        # Create a volume using the first 3 nodes
        ret = setup_volume(self.mnode,
                           servers_info_from_three_nodes,
                           self.volume,
                           force=True)
        self.assertTrue(ret, ("Failed to create and start volume"))
        g.log.info("Volume created and started successfully")

        # Creating another volume and stopping it
        second_volume = "second_volume"
        self.volume['name'] = second_volume
        ret = setup_volume(self.mnode,
                           servers_info_from_three_nodes,
                           self.volume,
                           force=True)
        self.assertTrue(ret, ("Failed to create and start volume"))
        g.log.info("Volume created and started succssfully")

        # stopping the second volume
        g.log.info("Stopping the second volume %s", second_volume)
        ret, _, _ = volume_stop(self.mnode, second_volume)
        self.assertEqual(ret, 0, ("Failed to stop the volume"))
        g.log.info("Successfully stopped second volume %s", second_volume)

        # Setting the server-quorum-type as server
        self.options = {"cluster.server-quorum-type": "server"}
        vol_list = get_volume_list(self.mnode)
        self.assertIsNotNone(vol_list, "Failed to get the volume list")
        g.log.info("Fetched the volume list")
        for volume in vol_list:
            g.log.info(
                "Setting the server-quorum-type as server"
                " on volume %s", volume)
            ret = set_volume_options(self.mnode, volume, self.options)
            self.assertTrue(ret, ("Failed to set the quorum type as a server"
                                  " on volume %s", volume))
        g.log.info("Server Quorum type is set as a server")

        # Setting the server quorum ratio to 90
        self.quorum_perecent = {'cluster.server-quorum-ratio': '90%'}
        ret = set_volume_options(self.mnode, 'all', self.quorum_perecent)
        self.assertTrue(ret, ("Failed to set the server quorum ratio "
                              "to 90 on servers"))
        g.log.info("Successfully set server quorum ratio to 90% on servers")

        # Stop glusterd on one of the node
        ret = stop_glusterd(self.servers[2])
        self.assertTrue(ret, ("Failed to stop glusterd on "
                              "node %s", self.servers[2]))
        g.log.info("Glusterd stop on the nodes : %s"
                   " succeeded", self.servers[2])

        # Check glusterd is stopped
        ret = is_glusterd_running(self.servers[2])
        self.assertEqual(ret, 1, "Unexpected: Glusterd is running on node")
        g.log.info("Expected: Glusterd stopped on node %s", self.servers[2])

        # Adding a new peer will fail as quorum not met
        ret, _, _ = peer_probe(self.mnode, self.servers[3])
        self.assertNotEqual(ret, 0,
                            ("Unexpected:"
                             "Succeeded to peer probe new node %s when quorum "
                             "is not met", self.servers[3]))
        g.log.info("Failed to peer probe new node as expected"
                   " when quorum not met")

        # Stopping an already started volume should fail as quorum is not met
        ret, _, _ = volume_start(self.mnode, second_volume)
        self.assertNotEqual(
            ret, 0, "Unexpected: Successfuly started "
            "volume even when quorum not met.")
        g.log.info(
            "Volume start %s failed as expected when quorum "
            "is not met", second_volume)

        # Stopping a volume should fail stop the first volume
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(
            ret, 1, "Unexpected: Successfully stopped"
            " volume even when quourm is not met")
        g.log.info(
            "volume stop %s failed as expected when quorum "
            "is not met", self.volname)

        # Stopping a volume with force option should fail
        ret, _, _ = volume_stop(self.mnode, self.volname, force=True)
        self.assertNotEqual(
            ret, 0, "Unexpected: Successfully "
            "stopped volume with force. Expected: "
            "Volume stop should fail when quourm is not met")
        g.log.info("volume stop failed as expected when quorum is not met")

        # Deleting a volume should fail. Deleting the second volume.
        ret = volume_delete(self.mnode, second_volume)
        self.assertFalse(
            ret, "Unexpected: Volume delete was "
            "successful even when quourm is not met")
        g.log.info("volume delete failed as expected when quorum is not met")

        # Volume reset should fail when quorum is not met
        ret, _, _ = volume_reset(self.mnode, self.volname)
        self.assertNotEqual(
            ret, 0, "Unexpected: Volume reset was "
            "successful even when quorum is not met")
        g.log.info("volume reset failed as expected when quorum is not met")

        # Volume reset should fail even with force when quourum is not met
        ret, _, _ = volume_reset(self.mnode, self.volname, force=True)
        self.assertNotEqual(
            ret, 0, "Unexpected: Volume reset was "
            "successful with force even "
            "when quourm is not met")
        g.log.info("volume reset failed as expected when quorum is not met")

        # Start glusterd on the node where glusterd is stopped
        ret = start_glusterd(self.servers[2])
        self.assertTrue(ret, "Failed to start glusterd on one node")
        g.log.info("Started glusterd on server"
                   " %s successfully", self.servers[2])

        ret = is_glusterd_running(self.servers[2])
        self.assertEqual(ret, 0, ("glusterd is not running on "
                                  "node %s", self.servers[2]))
        g.log.info("glusterd is running on node" " %s ", self.servers[2])

        # Check peer status whether all peer are in connected state none of the
        # nodes should be in peer rejected state
        halt, counter, _rc = 30, 0, False
        g.log.info("Wait for some seconds, right after glusterd start it "
                   "will create two daemon process it need few seconds "
                   "(like 3-5) to initialize the glusterd")
        while counter < halt:
            ret = is_peer_connected(self.mnode, self.servers[0:3])
            if not ret:
                g.log.info("Peers are not connected state,"
                           " Retry after 2 seconds .......")
                sleep(2)
                counter = counter + 2
            else:
                _rc = True
                g.log.info("Peers are in connected state in the cluster")
                break

        self.assertTrue(_rc, ("Peers are not connected state after "
                              "bringing back glusterd online on the "
                              "nodes in which previously glusterd "
                              "had been stopped"))

        # Check all bricks are online or wait for the bricks to be online
        ret = wait_for_bricks_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, "All bricks are not online")
        g.log.info("All bricks of the volume %s are online", self.volname)

        # Once quorum is met should be able to cleanup the volume
        ret = volume_delete(self.mnode, second_volume)
        self.assertTrue(ret, "Volume delete failed even when quorum is met")
        g.log.info("volume delete succeed without any issues")

        # Volume stop should succeed
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Volume stop failed")
        g.log.info("succeeded stopping the volume as expected")

        # volume reset should succeed
        ret, _, _ = volume_reset(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Volume reset failed ")
        g.log.info("volume reset succeeded as expected when quorum is not met")

        # Peer probe new node should succeed
        ret, _, _ = peer_probe(self.mnode, self.servers[3])
        self.assertEqual(
            ret, 0, ("Failed to peer probe new node even when quorum is met"))
        g.log.info("Succeeded to peer probe new node when quorum met")

        # Check peer status whether all peer are in connected state none of the
        # nodes should be in peer rejected state
        halt, counter, _rc = 30, 0, False
        g.log.info("Wait for some seconds, right after peer probe")
        while counter < halt:
            ret = is_peer_connected(self.mnode, self.servers[0:3])
            if not ret:
                g.log.info("Peers are not connected state,"
                           " Retry after 2 seconds .......")
                sleep(2)
                counter = counter + 2
            else:
                _rc = True
                g.log.info("Peers are in connected state in the cluster")
                break

        self.assertTrue(_rc, ("Peers are not connected state"))
예제 #6
0
    def test_volume_op(self):

        # Starting a non existing volume should fail
        ret, _, _ = volume_start(self.mnode, "no_vol", force=True)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to Start a non"
            " existing volume. Actual: Successfully started "
            "a non existing volume")
        g.log.info("Starting a non existing volume is failed")

        # Stopping a non existing volume should fail
        ret, _, _ = volume_stop(self.mnode, "no_vol", force=True)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to stop "
            "non-existing volume. Actual: Successfully "
            "stopped a non existing volume")
        g.log.info("Stopping a non existing volume is failed")

        # Deleting a non existing volume should fail
        ret = volume_delete(self.mnode, "no_vol")
        self.assertTrue(
            ret, "Expected: It should fail to delete a "
            "non existing volume. Actual:Successfully deleted "
            "a non existing volume")
        g.log.info("Deleting a non existing volume is failed")

        # Detach a server and try to create volume with node
        # which is not in cluster
        ret, _, _ = peer_detach(self.mnode, self.servers[1])
        self.assertEqual(ret, 0, ("Peer detach is failed"))
        g.log.info("Peer detach is successful")

        num_of_bricks = len(self.servers)
        bricks_list = form_bricks_list(self.mnode, self.volname, num_of_bricks,
                                       self.servers, self.all_servers_info)

        ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
        self.assertNotEqual(
            ret, 0, "Successfully created volume with brick "
            "from which is not a part of node")
        g.log.info("Creating a volume with brick from node which is not part "
                   "of cluster is failed")

        # Peer probe the detached server
        ret, _, _ = peer_probe(self.mnode, self.servers[1])
        self.assertEqual(ret, 0, ("Peer probe is failed"))
        g.log.info("Peer probe is successful")

        # Create and start a volume
        ret = setup_volume(self.mnode,
                           self.all_servers_info,
                           self.volume,
                           force=True)
        self.assertTrue(ret, "Failed to create the volume")
        g.log.info("Successfully created and started the volume")

        # Starting already started volume should fail
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to start a "
            "already started volume. Actual:Successfully"
            " started a already started volume ")
        g.log.info("Starting a already started volume is Failed.")

        # Deleting a volume without stopping should fail
        ret = volume_delete(self.mnode, self.volname)
        self.assertFalse(ret, ("Expected: It should fail to delete a volume"
                               " without stopping. Actual: Successfully "
                               "deleted a volume without stopping it"))
        g.log.error("Failed to delete a volume without stopping it")

        # Stopping a volume should succeed
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("volume stop is failed"))
        g.log.info("Volume stop is success")

        # Stopping a already stopped volume should fail
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to stop a "
            "already stopped volume . Actual: Successfully"
            "stopped a already stopped volume")
        g.log.info("Volume stop is failed on already stopped volume")

        # Deleting a volume should succeed
        ret = volume_delete(self.mnode, self.volname)
        self.assertTrue(ret, ("Volume delete is failed"))
        g.log.info("Volume delete is success")

        # Deleting a non existing volume should fail
        ret = volume_delete(self.mnode, self.volname)
        self.assertTrue(
            ret, "Expected: It should fail to delete a non "
            "existing volume. Actual:Successfully deleted a "
            "non existing volume")
        g.log.info("Volume delete is failed for non existing volume")

        # Volume info command should succeed
        ret = get_volume_info(self.mnode)
        self.assertIsNotNone(ret, "volume info command failed")
        g.log.info("Volume info command is success")
    def _shared_storage_test_without_node_reboot(self):
        """Shared storge testcase till the node reboot scenario"""

        # Enable shared storage and check it is present on the cluster
        self._enable_and_check_shared_storage()

        # Get all the bricks where shared storage is mounted
        brick_list = self._get_all_bricks()

        # Check the shared volume is mounted on the nodes where it is created
        self._is_shared_storage_mounted_on_the_nodes(brick_details=brick_list,
                                                     mounted=True)
        # Disable shared storage and check it is not present on the cluster
        self._disable_and_check_shared_storage()

        # Check the shared volume is unmounted on the nodes where it is created
        self._is_shared_storage_mounted_on_the_nodes(brick_details=brick_list,
                                                     mounted=False)

        # Create a volume with name gluster_shared_storage
        volume = "gluster_shared_storage"
        bricks_list = form_bricks_list(self.mnode, volume, 2, self.servers,
                                       self.all_servers_info)
        count = 0
        while count < 20:
            ret, _, _ = volume_create(self.mnode, volume, bricks_list, True)
            if not ret:
                break
            sleep(2)
            count += 1
        self.assertEqual(ret, 0, "Failed to create volume")
        g.log.info("Volume create is success")

        # Disable the shared storage should fail
        ret = disable_shared_storage(self.mnode)
        self.assertFalse(ret, ("Unexpected: Successfully disabled"
                               " shared-storage"))
        g.log.info("Volume set: failed as expected")

        # Check volume list to confirm gluster_shared_storage
        # is not deleted which was created before
        vol_list = get_volume_list(self.mnode)
        _rc = False
        for vol in vol_list:
            if vol == "gluster_shared_storage":
                _rc = True
                break
        self.assertTrue(_rc, ("gluster_shared_storage volume got"
                              " deleted after disabling it"))
        g.log.info("gluster_shared_storage volume not deleted as "
                   " expected after disabling enable-shared-storage")

        # Delete the volume created
        ret = volume_delete(self.mnode, volume)
        self.assertTrue(ret, ("Failed to cleanup the Volume " "%s", volume))
        g.log.info("Volume deleted successfully : %s", volume)

        # Enable shared storage and check it is present on the cluster
        self._enable_and_check_shared_storage()

        # Check the shared volume is mounted on the nodes where it is created
        self._is_shared_storage_mounted_on_the_nodes(brick_details=brick_list,
                                                     mounted=True)

        # Disable shared storage and check it is not present on the cluster
        self._disable_and_check_shared_storage()

        # Check the shared volume is unmounted on the nodes where it is created
        self._is_shared_storage_mounted_on_the_nodes(brick_details=brick_list,
                                                     mounted=False)
    def test_shared_storage(self):
        """This test case includes:
        -> Enable a shared storage
        -> Disable a shared storage
        -> Create volume of any type with
           name gluster_shared_storage
        -> Disable the shared storage
        -> Check, volume created in step-3 is
           not deleted
        -> Delete the volume
        -> Enable the shared storage
        -> Check volume with name gluster_shared_storage
           is created
        -> Disable the shared storage
        """
        # pylint: disable=too-many-statements, too-many-branches
        # Enable a shared storage without specifying the domain
        ret = enable_shared_storage(self.mnode)
        self.assertTrue(ret, ("Failed to enable a shared storage"))
        g.log.info("Successfully enabled: enable-shared-storage option")

        # Check volume list to confirm gluster_shared_storage is created
        ret = check_gluster_shared_volume(self.mnode)
        self.assertTrue(ret, ("gluster_shared_storage volume not"
                              " created even after enabling it"))
        g.log.info("gluster_shared_storage volume created" " successfully")

        # Check the shared volume got mounted
        ret = is_shared_volume_mounted(self.mnode)
        self.assertTrue(ret, ("Shared volume not mounted even"
                              " after enabling it"))
        g.log.info("Shared volume mounted successfully")

        # Disable a shared storage without specifying the domain
        ret = disable_shared_storage(self.mnode)
        self.assertTrue(ret, ("Failed to disable a shared storage"))
        g.log.info("Successfully disabled: disable-shared-storage")

        # Check volume list to confirm gluster_shared_storage is deleted
        ret = check_gluster_shared_volume(self.mnode, present=False)
        self.assertTrue(ret, ("gluster_shared_storage volume not"
                              " deleted even after disabling it"))
        g.log.info("gluster_shared_storage volume deleted" " successfully")

        # Check the shared volume unmounted
        ret = is_shared_volume_mounted(self.mnode)
        self.assertFalse(ret, ("Shared volume not unmounted even"
                               " after disabling it"))
        g.log.info("Shared volume unmounted successfully")

        # Create a volume with name gluster_shared_storage
        g.log.info("creation of volume should succeed")
        volume = "gluster_shared_storage"
        bricks_list = form_bricks_list(self.mnode, volume, 2, self.servers,
                                       self.all_servers_info)
        count = 0
        while count < 20:
            ret, _, _ = volume_create(self.mnode, volume, bricks_list, True)
            if not ret:
                break
            sleep(2)
            count += 1
        self.assertEqual(ret, 0, "Failed to create volume")
        g.log.info("Volume create is success")

        # Disable the shared storage should fail
        ret = disable_shared_storage(self.mnode)
        self.assertFalse(ret, ("Unexpected: Successfully disabled"
                               " shared-storage"))
        g.log.info("Volume set: failed as expected")

        # Check volume list to confirm gluster_shared_storage
        # is not deleted which was created before
        vol_list = get_volume_list(self.mnode)
        _rc = False
        for vol in vol_list:
            if vol == "gluster_shared_storage":
                _rc = True
                break
        self.assertTrue(_rc, ("gluster_shared_storage volume got"
                              " deleted after disabling it"))
        g.log.info("gluster_shared_storage volume not deleted as "
                   " expected after disabling enable-shared-storage")

        # Delete the volume created
        ret = volume_delete(self.mnode, volume)
        self.assertTrue(ret, ("Failed to cleanup the Volume " "%s", volume))
        g.log.info("Volume deleted successfully : %s", volume)

        # Enable the shared storage
        ret = enable_shared_storage(self.mnode)
        self.assertTrue(ret, ("Failed to enable a shared storage"))
        g.log.info("Successfully enabled: enable-shared-storage option")

        # Check volume list to confirm gluster_shared_storage is created
        ret = check_gluster_shared_volume(self.mnode)
        self.assertTrue(ret, ("gluster_shared_storage volume not"
                              " created even after enabling it"))
        g.log.info("gluster_shared_storage volume created" " successfully")

        # Check the shared volume got mounted
        ret = is_shared_volume_mounted(self.mnode)
        self.assertTrue(ret, ("Shared volume not mounted even"
                              " after enabling it"))
        g.log.info("Shared volume mounted successfully")

        # Disable a shared storage
        ret = disable_shared_storage(self.mnode)
        self.assertTrue(ret, ("Failed to disable a shared storage"))
        g.log.info("Successfully disabled: disable-shared-storage")

        # Check volume list to confirm gluster_shared_storage is deleted
        ret = check_gluster_shared_volume(self.mnode, present=False)
        self.assertTrue(ret, ("gluster_shared_storage volume not"
                              " deleted even after disabling it"))
        g.log.info("gluster_shared_storage volume deleted" " successfully")

        # Check the shared volume unmounted
        ret = is_shared_volume_mounted(self.mnode)
        self.assertFalse(ret, ("Shared volume not unmounted even"
                               " after disabling it"))
        g.log.info("Shared volume unmounted successfully")