def test_reserved_port_range_for_gluster(self):
        """
        Test Case:
        1) Set the max-port option in glusterd.vol file to 49200
        2) Restart glusterd on one of the node
        3) Create 50 volumes in a loop
        4) Try to start the 50 volumes in a loop
        5) Confirm that the 50th volume failed to start
        6) Confirm the error message, due to which volume failed to start
        7) Set the max-port option in glusterd.vol file back to default value
        8) Restart glusterd on the same node
        9) Starting the 50th volume should succeed now
        """
        # Set max port number as 49200 in glusterd.vol file
        cmd = "sed -i 's/60999/49200/' /etc/glusterfs/glusterd.vol"
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertEqual(
            ret, 0, "Failed to set the max-port to 49200 in"
            " glusterd.vol file")

        self.port_range_changed = True

        # Restart glusterd
        ret = restart_glusterd(self.mnode)
        self.assertTrue(ret, "Failed to restart glusterd")
        g.log.info("Successfully restarted glusterd on node: %s", self.mnode)

        # Check node on which glusterd was restarted is back to 'Connected'
        # state from any other peer
        ret = wait_for_peers_to_connect(self.servers[1], self.servers)
        self.assertTrue(ret, "All the peers are not in connected state")

        # Fetch the available bricks dict
        bricks_dict = get_servers_bricks_dict(self.servers,
                                              self.all_servers_info)
        self.assertIsNotNone(bricks_dict, "Failed to get the bricks dict")

        # Create 50 volumes in a loop
        for i in range(1, 51):
            self.volname = "volume-%d" % i
            bricks_list = []
            j = 0
            for key, value in bricks_dict.items():
                j += 1
                brick = choice(value)
                brick = "{}:{}/{}_brick-{}".format(key, brick, self.volname, j)
                bricks_list.append(brick)

            ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
            self.assertEqual(ret, 0,
                             "Failed to create volume: %s" % self.volname)
            g.log.info("Successfully created volume: %s", self.volname)

        # Try to start 50 volumes in loop
        for i in range(1, 51):
            self.volname = "volume-%d" % i
            ret, _, err = volume_start(self.mnode, self.volname)
            if ret:
                break
        g.log.info("Successfully started all the volumes until volume: %s",
                   self.volname)

        # Confirm if the 50th volume failed to start
        self.assertEqual(
            i, 50, "Failed to start the volumes volume-1 to"
            " volume-49 in a loop")

        # Confirm the error message on volume start fail
        err_msg = ("volume start: volume-50: failed: Commit failed on"
                   " localhost. Please check log file for details.")
        self.assertEqual(
            err.strip(), err_msg, "Volume start failed with"
            " a different error message")

        # Confirm the error message from the log file
        cmd = ("cat /var/log/glusterfs/glusterd.log | %s" %
               "grep -i 'All the ports in the range are exhausted' | wc -l")
        ret, out, _ = g.run(self.mnode, cmd)
        self.assertEqual(ret, 0, "Failed to 'grep' the glusterd.log file")
        self.assertNotEqual(
            out, "0", "Volume start didn't fail with expected"
            " error message")

        # Set max port number back to default value in glusterd.vol file
        cmd = "sed -i 's/49200/60999/' /etc/glusterfs/glusterd.vol"
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertEqual(
            ret, 0, "Failed to set the max-port back to 60999 in"
            " glusterd.vol file")

        self.port_range_changed = False

        # Restart glusterd on the same node
        ret = restart_glusterd(self.mnode)
        self.assertTrue(ret, "Failed to restart glusterd")
        g.log.info("Successfully restarted glusterd on node: %s", self.mnode)

        # Starting the 50th volume should succeed now
        self.volname = "volume-%d" % i
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to start volume: %s" % self.volname)
    def test_shared_storage(self):
        """This test case includes:
        -> Enable a shared storage
        -> Disable a shared storage
        -> Create volume of any type with
           name gluster_shared_storage
        -> Disable the shared storage
        -> Check, volume created in step-3 is
           not deleted
        -> Delete the volume
        -> Enable the shared storage
        -> Check volume with name gluster_shared_storage
           is created
        -> Disable the shared storage
        """
        # pylint: disable=too-many-statements, too-many-branches
        # Enable a shared storage without specifying the domain
        ret = enable_shared_storage(self.mnode)
        self.assertTrue(ret, ("Failed to enable a shared storage"))
        g.log.info("Successfully enabled: enable-shared-storage option")

        # Check volume list to confirm gluster_shared_storage is created
        ret = check_gluster_shared_volume(self.mnode)
        self.assertTrue(ret, ("gluster_shared_storage volume not"
                              " created even after enabling it"))
        g.log.info("gluster_shared_storage volume created" " successfully")

        # Check the shared volume got mounted
        ret = is_shared_volume_mounted(self.mnode)
        self.assertTrue(ret, ("Shared volume not mounted even"
                              " after enabling it"))
        g.log.info("Shared volume mounted successfully")

        # Disable a shared storage without specifying the domain
        ret = disable_shared_storage(self.mnode)
        self.assertTrue(ret, ("Failed to disable a shared storage"))
        g.log.info("Successfully disabled: disable-shared-storage")

        # Check volume list to confirm gluster_shared_storage is deleted
        ret = check_gluster_shared_volume(self.mnode, present=False)
        self.assertTrue(ret, ("gluster_shared_storage volume not"
                              " deleted even after disabling it"))
        g.log.info("gluster_shared_storage volume deleted" " successfully")

        # Check the shared volume unmounted
        ret = is_shared_volume_mounted(self.mnode)
        self.assertFalse(ret, ("Shared volume not unmounted even"
                               " after disabling it"))
        g.log.info("Shared volume unmounted successfully")

        # Create a volume with name gluster_shared_storage
        g.log.info("creation of volume should succeed")
        volume = "gluster_shared_storage"
        bricks_list = form_bricks_list(self.mnode, volume, 2, self.servers,
                                       self.all_servers_info)
        count = 0
        while count < 20:
            ret, _, _ = volume_create(self.mnode, volume, bricks_list, True)
            if not ret:
                break
            sleep(2)
            count += 1
        self.assertEqual(ret, 0, "Failed to create volume")
        g.log.info("Volume create is success")

        # Disable the shared storage should fail
        ret = disable_shared_storage(self.mnode)
        self.assertFalse(ret, ("Unexpected: Successfully disabled"
                               " shared-storage"))
        g.log.info("Volume set: failed as expected")

        # Check volume list to confirm gluster_shared_storage
        # is not deleted which was created before
        vol_list = get_volume_list(self.mnode)
        _rc = False
        for vol in vol_list:
            if vol == "gluster_shared_storage":
                _rc = True
                break
        self.assertTrue(_rc, ("gluster_shared_storage volume got"
                              " deleted after disabling it"))
        g.log.info("gluster_shared_storage volume not deleted as "
                   " expected after disabling enable-shared-storage")

        # Delete the volume created
        ret = volume_delete(self.mnode, volume)
        self.assertTrue(ret, ("Failed to cleanup the Volume " "%s", volume))
        g.log.info("Volume deleted successfully : %s", volume)

        # Enable the shared storage
        ret = enable_shared_storage(self.mnode)
        self.assertTrue(ret, ("Failed to enable a shared storage"))
        g.log.info("Successfully enabled: enable-shared-storage option")

        # Check volume list to confirm gluster_shared_storage is created
        ret = check_gluster_shared_volume(self.mnode)
        self.assertTrue(ret, ("gluster_shared_storage volume not"
                              " created even after enabling it"))
        g.log.info("gluster_shared_storage volume created" " successfully")

        # Check the shared volume got mounted
        ret = is_shared_volume_mounted(self.mnode)
        self.assertTrue(ret, ("Shared volume not mounted even"
                              " after enabling it"))
        g.log.info("Shared volume mounted successfully")

        # Disable a shared storage
        ret = disable_shared_storage(self.mnode)
        self.assertTrue(ret, ("Failed to disable a shared storage"))
        g.log.info("Successfully disabled: disable-shared-storage")

        # Check volume list to confirm gluster_shared_storage is deleted
        ret = check_gluster_shared_volume(self.mnode, present=False)
        self.assertTrue(ret, ("gluster_shared_storage volume not"
                              " deleted even after disabling it"))
        g.log.info("gluster_shared_storage volume deleted" " successfully")

        # Check the shared volume unmounted
        ret = is_shared_volume_mounted(self.mnode)
        self.assertFalse(ret, ("Shared volume not unmounted even"
                               " after disabling it"))
        g.log.info("Shared volume unmounted successfully")
    def test_rebalance_start_not_fail(self):
        """
        1. On Node N1, Add "transport.socket.bind-address N1" in the
            /etc/glusterfs/glusterd.vol
        2. Create a replicate (1X3) and disperse (4+2) volumes with
            name more than 108 chars
        3. Mount the both volumes using node 1 where you added the
            "transport.socket.bind-address" and start IO(like untar)
        4. Perform add-brick on replicate volume 3-bricks
        5. Start rebalance on replicated volume
        6. Perform add-brick for disperse volume 6 bricks
        7. Start rebalance of disperse volume
        """
        cmd = ("sed -i 's/end-volume/option "
               "transport.socket.bind-address {}\\n&/g' "
               "/etc/glusterfs/glusterd.vol".format(self.mnode))
        disperse = ("disperse_e4upxjmtre7dl4797wedbp7r3jr8equzvmcae9f55t6z1"
                    "ffhrlk40jtnrzgo4n48fjf6b138cttozw3c6of3ze71n9urnjkshoi")
        replicate = ("replicate_e4upxjmtre7dl4797wedbp7r3jr8equzvmcae9f55t6z1"
                     "ffhrlk40tnrzgo4n48fjf6b138cttozw3c6of3ze71n9urnjskahn")

        volnames = (disperse, replicate)
        for volume, vol_name in (("disperse", disperse), ("replicate",
                                                          replicate)):

            bricks_list = form_bricks_list(self.mnode, volume,
                                           6 if volume == "disperse" else 3,
                                           self.servers, self.all_servers_info)
            if volume == "replicate":
                ret, _, _ = volume_create(self.mnode,
                                          replicate,
                                          bricks_list,
                                          replica_count=3)

            else:
                ret, _, _ = volume_create(self.mnode,
                                          disperse,
                                          bricks_list,
                                          force=True,
                                          disperse_count=6,
                                          redundancy_count=2)

            self.assertFalse(
                ret, "Unexpected: Volume create '{}' failed ".format(vol_name))
            ret, _, _ = volume_start(self.mnode, vol_name)
            self.assertFalse(ret, "Failed to start volume")

        # Add entry in 'glusterd.vol'
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertFalse(ret, "Failed to add entry in 'glusterd.vol' file")

        self.list_of_io_processes = []

        # mount volume
        self.mount = ("/mnt/replicated_mount", "/mnt/disperse_mount")
        for mount_dir, volname in zip(self.mount, volnames):
            ret, _, _ = mount_volume(volname, "glusterfs", mount_dir,
                                     self.mnode, self.clients[0])
            self.assertFalse(
                ret, "Failed to mount the volume '{}'".format(mount_dir))

            # Run IO
            # Create a dir to start untar
            # for mount_point in self.mount:
            self.linux_untar_dir = "{}/{}".format(mount_dir, "linuxuntar")
            ret = mkdir(self.clients[0], self.linux_untar_dir)
            self.assertTrue(ret, "Failed to create dir linuxuntar for untar")

            # Start linux untar on dir linuxuntar
            ret = run_linux_untar(self.clients[:1],
                                  mount_dir,
                                  dirs=tuple(['linuxuntar']))
            self.list_of_io_processes += ret
            self.is_io_running = True

        # Add Brick to replicate Volume
        bricks_list = form_bricks_list(self.mnode, replicate, 3, self.servers,
                                       self.all_servers_info, "replicate")
        ret, _, _ = add_brick(self.mnode, replicate, bricks_list, force=True)
        self.assertFalse(ret, "Failed to add-brick '{}'".format(replicate))

        # Trigger Rebalance on the volume
        ret, _, _ = rebalance_start(self.mnode, replicate)
        self.assertFalse(
            ret,
            "Failed to start rebalance on the volume '{}'".format(replicate))

        # Add Brick to disperse Volume
        bricks_list = form_bricks_list(self.mnode, disperse, 6, self.servers,
                                       self.all_servers_info, "disperse")

        ret, _, _ = add_brick(self.mnode, disperse, bricks_list, force=True)
        self.assertFalse(ret, "Failed to add-brick '{}'".format(disperse))

        # Trigger Rebalance on the volume
        ret, _, _ = rebalance_start(self.mnode, disperse)
        self.assertFalse(
            ret, "Failed to start rebalance on the volume {}".format(disperse))

        # Check if Rebalance is completed on both the volume
        for volume in (replicate, disperse):
            ret = wait_for_rebalance_to_complete(self.mnode,
                                                 volume,
                                                 timeout=600)
            self.assertTrue(
                ret,
                "Rebalance is not Compleated on Volume '{}'".format(volume))