コード例 #1
0
def check_gluster_shared_volume(mnode, present=True):
    """
    Check gluster shared volume present or absent.

    Args:
        mnode (str) : Node on which command is to be executed
        present (bool) : True if you want to check presence
                         False if you want to check absence.

    Returns:
        bool : True if shared volume is present or absent.
               False otherwise.
    """
    if present:
        halt = 20
        counter = 0
        g.log.info("Wait for some seconds to create "
                   "gluster_shared_storage volume.")

        while counter < halt:
            vol_list = get_volume_list(mnode)
            if "gluster_shared_storage" in vol_list:
                return True
            else:
                g.log.info("Wait for some seconds, since it takes "
                           "time to create gluster_shared_storage "
                           "volume.")
                sleep(2)
                counter = counter + 2

        return False

    else:
        halt = 20
        counter = 0
        g.log.info("Wait for some seconds to delete "
                   "gluster_shared_storage volume.")

        while counter < halt:
            vol_list = get_volume_list(mnode)
            if "gluster_shared_storage" not in vol_list:
                return True
            else:
                g.log.info("Wait for some seconds, since it takes "
                           "time to delete gluster_shared_storage "
                           "volume.")
            sleep(2)
            counter = counter + 2

        return False
コード例 #2
0
    def tearDown(self):

        # Cleanup and umount volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)
        g.log.info("Successful in umounting the volume and Cleanup")

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        GlusterBaseClass.tearDown.im_func(self)
コード例 #3
0
    def tearDown(self):

        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                # check all bricks are online
                ret = wait_for_bricks_to_be_online(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to bring bricks online"
                                         "for volume %s" % volume)
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe peer "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        self.get_super_method(self, 'tearDown')()
コード例 #4
0
    def tearDown(self):

        # Unmounting the volume.
        ret, _, _ = umount_volume(mclient=self.mounts[0].client_system,
                                  mpoint=self.mounts[0].mountpoint)
        if ret:
            raise ExecutionError("Volume %s is not unmounted" % self.volname)
        g.log.info("Volume unmounted successfully : %s", self.volname)

        # clean up all volumes
        vol_list = get_volume_list(self.mnode)
        if not vol_list:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume % s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        # Cleaning the deleted volume bricks
        for brick in self.brick_list:
            node, brick_path = brick.split(r':')
            cmd = "rm -rf " + brick_path
            ret, _, _ = g.run(node, cmd)
            if ret:
                raise ExecutionError("Failed to delete the brick "
                                     "dir's of deleted volume")

        self.get_super_method(self, 'tearDown')()
コード例 #5
0
    def tearDown(self):
        # stopping the volume and Cleaning up the volume
        self.get_super_method(self, 'tearDown')()
        ret = is_glusterd_running(self.servers)
        if ret:
            ret = start_glusterd(self.servers)
            if not ret:
                raise ExecutionError("Failed to start glusterd on %s" %
                                     self.servers)
        # Takes 5 seconds to restart glusterd into peer connected state
        sleep(5)
        g.log.info("Glusterd started successfully on %s", self.servers)

        # checking for peer status from every node
        ret = is_peer_connected(self.mnode, self.servers)
        if not ret:
            ret = peer_probe_servers(self.mnode, self.servers)
            if not ret:
                raise ExecutionError("Failed to peer probe failed in "
                                     "servers %s" % self.servers)
        g.log.info("All peers are in connected state")
        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed Cleanup the Volume")
        g.log.info("Volume deleted successfully")
コード例 #6
0
    def tearDown(self):

        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed Cleanup the volume")
            g.log.info("Volume deleted successfully %s", volume)

        # Setting quorum ratio to 51%
        ret = set_volume_options(self.mnode, 'all',
                                 {'cluster.server-quorum-ratio': '51%'})
        if not ret:
            raise ExecutionError("Failed to set server quorum ratio on %s" %
                                 self.volname)

        # Peer probe servers since we are doing peer detach in setUpClass
        for server in self.servers:
            ret = is_peer_connected(server, self.servers)
            if not ret:
                ret = peer_probe_servers(server, self.servers)
                if not ret:
                    raise ExecutionError(
                        "Peer probe failed to one of the node")
                g.log.info("Peer probe successful")

        self.get_super_method(self, 'tearDown')()
コード例 #7
0
def wait_to_heal_complete(gluster_pod,
                          hostname=None,
                          timeout=300,
                          wait_step=5):
    """Monitors heal for volumes on gluster
        gluster_pod (podcmd | str): gluster pod class object has gluster
                                    pod and ocp master node or gluster
                                    pod name
        hostname (str): master node on which gluster pod exists
    """
    gluster_pod = _get_gluster_pod(gluster_pod, hostname)

    gluster_vol_list = get_volume_list(gluster_pod)
    if not gluster_vol_list:
        raise AssertionError("failed to get gluster volume list")

    _waiter = waiter.Waiter(timeout=timeout, interval=wait_step)
    for gluster_vol in gluster_vol_list:
        for w in _waiter:
            if is_heal_complete(gluster_pod, gluster_vol):
                break

    if w.expired:
        err_msg = ("reached timeout waiting for all the gluster volumes "
                   "to reach the 'healed' state.")
        g.log.error(err_msg)
        raise AssertionError(err_msg)
コード例 #8
0
    def tearDown(self):
        """
        tearDown for every test
        """
        # clean up all volumes
        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume % s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        # Cleaning the deleted volume bricks
        for brick in self.brick_list:
            node, brick_path = brick.split(r':')
            cmd = "rm -rf " + brick_path
            ret, _, _ = g.run(node, cmd)
            if ret:
                raise ExecutionError("Failed to delete the brick "
                                     "dir's of deleted volume")

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
コード例 #9
0
def get_block_hosting_volume_name(heketi_client_node, heketi_server_url,
                                  block_volume):
    """Returns block hosting volume name of given block volume

    Args:
        heketi_client_node (str): Node on which cmd has to be executed.
        heketi_server_url (str): Heketi server url
        block_volume (str): Block volume of which block hosting volume
                            returned
    Returns:
        str : Name of the block hosting volume for given block volume
    """
    block_vol_info = heketi_blockvolume_info(heketi_client_node,
                                             heketi_server_url, block_volume)

    for line in block_vol_info.splitlines():
        block_hosting_vol_match = re.search("^Block Hosting Volume: (.*)$",
                                            line)

        if not block_hosting_vol_match:
            continue

        gluster_vol_list = get_volume_list("auto_get_gluster_endpoint")
        for vol in gluster_vol_list:
            if block_hosting_vol_match.group(1).strip() in vol:
                return vol
コード例 #10
0
    def tearDown(self):

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        # clean up all volumes and detaches peers from cluster

        vol_list = get_volume_list(self.mnode)
        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to Cleanup the "
                                     "Volume %s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        # Calling GlusterBaseClass tearDown
        GlusterBaseClass.tearDown.im_func(self)
コード例 #11
0
    def tearDownClass(cls):
        """
        Clean up the volume and umount volume from client
        """
        # stopping all volumes
        g.log.info("Starting to Cleanup all Volumes")
        volume_list = get_volume_list(cls.mnode)
        for volume in volume_list:
            ret = cleanup_volume(cls.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to cleanup Volume %s" % volume)
            g.log.info("Volume: %s cleanup is done", volume)
        g.log.info("Successfully Cleanedup all Volumes")

        # umount all volumes
        for mount_point in cls.mount_points:
            ret, _, _ = umount_volume(cls.client, mount_point)
            if ret:
                raise ExecutionError("Failed to umount on volume %s " %
                                     cls.volname)
            g.log.info("Successfully umounted %s on client %s", cls.volname,
                       cls.client)

        # calling GlusterBaseClass tearDownClass
        GlusterBaseClass.tearDownClass.im_func(cls)
コード例 #12
0
    def tearDownClass(cls):
        """
        Clean up the volume and umount volume from client
        """
        # umount all volumes
        for mount_obj in cls.mounts:
            ret, _, _ = umount_volume(
                mount_obj.client_system, mount_obj.mountpoint)
            if ret:
                raise ExecutionError(
                    "Failed to umount on volume %s "
                    % cls.volname)
            g.log.info("Successfully umounted %s on client %s",
                       cls.volname, mount_obj.client_system)
            ret = rmdir(mount_obj.client_system, mount_obj.mountpoint)
            if not ret:
                raise ExecutionError(
                    ret, "Failed to remove directory mount directory.")
            g.log.info("Mount directory is removed successfully")

        # stopping all volumes
        g.log.info("Starting to Cleanup all Volumes")
        volume_list = get_volume_list(cls.mnode)
        for volume in volume_list:
            ret = cleanup_volume(cls.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to cleanup Volume %s" % volume)
            g.log.info("Volume: %s cleanup is done", volume)
        g.log.info("Successfully Cleanedup all Volumes")

        # calling GlusterBaseClass tearDownClass
        cls.get_super_method(cls, 'tearDownClass')()
コード例 #13
0
    def tearDown(self):

        # unmount the volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volume unmount failed for %s" % self.volname)

        # get volumes list and clean up all the volumes
        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Error while getting vol list")
        else:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if ret is True:
                    g.log.info("Volume deleted successfully : %s", volume)
                else:
                    raise ExecutionError("Failed Cleanup the"
                                         " Volume %s" % volume)

        # peer probe all the servers
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Peer probe failed to all the servers from "
                                 "the node.")

        GlusterBaseClass.tearDown.im_func(self)
コード例 #14
0
def get_block_hosting_volume_name(heketi_client_node, heketi_server_url,
                                  block_volume):
    """Returns block hosting volume name of given block volume

    Args:
        heketi_client_node (str): Node on which cmd has to be executed.
        heketi_server_url (str): Heketi server url
        block_volume (str): Block volume of which block hosting volume
                            returned
    Returns:
        str : Name of the block hosting volume for given block volume
    """
    block_vol_info = heketi_blockvolume_info(
        heketi_client_node, heketi_server_url, block_volume
    )

    for line in block_vol_info.splitlines():
        block_hosting_vol_match = re.search(
            "^Block Hosting Volume: (.*)$", line
        )

        if not block_hosting_vol_match:
            continue

        gluster_vol_list = get_volume_list("auto_get_gluster_endpoint")
        for vol in gluster_vol_list:
            if block_hosting_vol_match.group(1).strip() in vol:
                return vol
コード例 #15
0
    def tearDown(self):

        # start glusterd on all servers
        ret = start_glusterd(self.servers)
        if not ret:
            raise ExecutionError("Failed to start glusterd on all servers")

        for server in self.servers:
            ret = is_peer_connected(server, self.servers)
            if not ret:
                ret = peer_probe_servers(server, self.servers)
                if not ret:
                    raise ExecutionError("Failed to peer probe all "
                                         "the servers")

        # clean up all volumes
        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume % s" % volume)
            g.log.info("Volume deleted successfully : %s" % volume)

        GlusterBaseClass.tearDown.im_func(self)
コード例 #16
0
    def test_volume_create_and_list_volume(self):
        """Validate heketi and gluster volume list"""
        g.log.info("List gluster volumes before Heketi volume creation")
        existing_g_vol_list = get_volume_list('auto_get_gluster_endpoint')
        self.assertTrue(existing_g_vol_list, ("Unable to get volumes list"))

        g.log.info("List heketi volumes before volume creation")
        existing_h_vol_list = heketi_volume_list(
            self.heketi_client_node, self.heketi_server_url,
            json=True)["volumes"]
        g.log.info("Heketi volumes successfully listed")

        g.log.info("Create a heketi volume")
        out = heketi_volume_create(self.heketi_client_node,
                                   self.heketi_server_url,
                                   self.volume_size, json=True)
        g.log.info("Heketi volume successfully created" % out)
        volume_id = out["bricks"][0]["volume"]
        self.addCleanup(
            heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, volume_id)

        g.log.info("List heketi volumes after volume creation")
        h_vol_list = heketi_volume_list(
            self.heketi_client_node, self.heketi_server_url,
            json=True)["volumes"]
        g.log.info("Heketi volumes successfully listed")

        g.log.info("List gluster volumes after Heketi volume creation")
        g_vol_list = get_volume_list('auto_get_gluster_endpoint')
        self.assertTrue(g_vol_list, ("Unable to get volumes list"))
        g.log.info("Successfully got the volumes list")

        # Perform checks
        self.assertEqual(
            len(existing_g_vol_list) + 1, len(g_vol_list),
            "Expected creation of only one volume in Gluster creating "
            "Heketi volume. Here is lists before and after volume creation: "
            "%s \n%s" % (existing_g_vol_list, g_vol_list))
        self.assertEqual(
            len(existing_h_vol_list) + 1, len(h_vol_list),
            "Expected creation of only one volume in Heketi. Here is lists "
            "of Heketi volumes before and after volume creation: %s\n%s" % (
                existing_h_vol_list, h_vol_list))
コード例 #17
0
    def test_volume_create_and_list_volume(self):
        """Validate heketi and gluster volume list"""
        g.log.info("List gluster volumes before Heketi volume creation")
        existing_g_vol_list = get_volume_list('auto_get_gluster_endpoint')
        self.assertTrue(existing_g_vol_list, ("Unable to get volumes list"))

        g.log.info("List heketi volumes before volume creation")
        existing_h_vol_list = heketi_volume_list(self.heketi_client_node,
                                                 self.heketi_server_url,
                                                 json=True)["volumes"]
        g.log.info("Heketi volumes successfully listed")

        g.log.info("Create a heketi volume")
        out = heketi_volume_create(self.heketi_client_node,
                                   self.heketi_server_url,
                                   self.volume_size,
                                   json=True)
        g.log.info("Heketi volume successfully created" % out)
        volume_id = out["bricks"][0]["volume"]
        self.addCleanup(heketi_volume_delete, self.heketi_client_node,
                        self.heketi_server_url, volume_id)

        g.log.info("List heketi volumes after volume creation")
        h_vol_list = heketi_volume_list(self.heketi_client_node,
                                        self.heketi_server_url,
                                        json=True)["volumes"]
        g.log.info("Heketi volumes successfully listed")

        g.log.info("List gluster volumes after Heketi volume creation")
        g_vol_list = get_volume_list('auto_get_gluster_endpoint')
        self.assertTrue(g_vol_list, ("Unable to get volumes list"))
        g.log.info("Successfully got the volumes list")

        # Perform checks
        self.assertEqual(
            len(existing_g_vol_list) + 1, len(g_vol_list),
            "Expected creation of only one volume in Gluster creating "
            "Heketi volume. Here is lists before and after volume creation: "
            "%s \n%s" % (existing_g_vol_list, g_vol_list))
        self.assertEqual(
            len(existing_h_vol_list) + 1, len(h_vol_list),
            "Expected creation of only one volume in Heketi. Here is lists "
            "of Heketi volumes before and after volume creation: %s\n%s" %
            (existing_h_vol_list, h_vol_list))
コード例 #18
0
    def tearDown(self):
        '''
        clean up all volumes and detaches peers from cluster
        '''
        vol_list = get_volume_list(self.mnode)
        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            self.assertTrue(ret, "Failed to Cleanup the Volume %s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        self.get_super_method(self, 'tearDown')()
コード例 #19
0
    def tearDown(self):
        '''
        clean up all volumes and detaches peers from cluster
        '''
        vol_list = get_volume_list(self.mnode)
        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            self.assertTrue(ret, "Failed to Cleanup the Volume %s" % volume)
            g.log.info("Volume deleted successfully : %s" % volume)

        GlusterBaseClass.tearDown.im_func(self)
コード例 #20
0
    def tearDown(self):
        # Stopping and cleaning up the volume
        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get volume list")

        for volume in vol_list:
            if not cleanup_volume(self.mnode, volume):
                raise ExecutionError("Failed Cleanup the Volume")

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
コード例 #21
0
    def validate_file_volumes_count(self, h_node, h_server, node_ip):

        # check volume count from heketi and gluster are same
        heketi_topology_info(h_node, h_server, json=True)
        h_volume_list = heketi_volume_list(h_node, h_server, json=True)
        vol_list = get_volume_list(node_ip)
        self.assertIsNotNone(
            vol_list, "Failed to get volumes list")
        self.assertEqual(
            len(h_volume_list['volumes']), len(vol_list),
            "Failed to verify volume count Expected:'{}', Actual:'{}'".format(
                len(h_volume_list['volumes']), len(vol_list)))
コード例 #22
0
    def tearDown(self):
        # stopping the volume and Cleaning up the volume
        GlusterBaseClass.tearDown.im_func(self)
        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed Cleanup the Volume")
        g.log.info("Volume deleted successfully")
コード例 #23
0
    def tearDown(self):

        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume % s" % volume)
            g.log.info("Volume deleted successfully : %s" % volume)

        GlusterBaseClass.tearDown.im_func(self)
コード例 #24
0
    def tearDown(self):
        # Stopping and cleaning up the volume
        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed Cleanup the Volume")
            g.log.info("Volume deleted successfully : %s", volume)

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
コード例 #25
0
    def tearDown(self):
        """tear Down callback"""
        # clean up all volumes
        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get the volume list")
        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume % s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
コード例 #26
0
def form_bricks_for_multivol(mnode, volname, number_of_bricks, servers,
                             servers_info):
    """
    Forms brics list for volume create/add-brick given the number_of_bricks
    servers, servers_info, for multiple volume cluster and for brick multiplex
    enabled cluster.

    Args:
        mnode (str): Node on which commands has to be executed.
        volname (str): Volume name for which we require brick-list
        number_of_bricks (int): The number of bricks for which brick list
                                has to be created.
        servers (str|list): A server|List of servers from which the bricks
                            needs to be selected for creating the brick list.
        servers_info (dict): Dict of server info of each servers.

    Returns:
        list: List of bricks to use with volume create.
        Nonetype: If unable to fetch the brick list

    """
    if not isinstance(servers, list):
        servers = [servers]

    brick_index, brick_list_for_volume = 0, []

    # Importing get_all_bricks() from bricks_libs to avoid cyclic imports
    from glustolibs.gluster.brick_libs import get_all_bricks

    # Get all volume list present in the cluster from mnode
    current_vol_list = get_volume_list(mnode)
    for volume in current_vol_list:
        brick_index = brick_index + len(get_all_bricks(mnode, volume))
    g.log.info("current brick_index %s" % brick_index)

    # Get all bricks_count and bricks_list
    all_brick_count, bricks_list = get_all_bricks_from_servers_multivol(
        servers, servers_info)
    if not (all_brick_count > 1):
        g.log.error("Unable to get the bricks present in the specified"
                    "servers")
        return None

    for num in range(number_of_bricks):
        brick = brick_index % all_brick_count
        brick_list_for_volume.append(
            "%s/%s_brick%d" % (bricks_list[brick], volname, brick_index))
        brick_index += 1

    return brick_list_for_volume
コード例 #27
0
    def test_brickmux_brick_process(self):
        """
        1. Create a 3 node cluster.
        2. Set cluster.brick-multiplex to enable.
        3. Create 15 volumes of type replica 1x3.
        4. Start all the volumes one by one.
        5. While the volumes are starting reboot one node.
        6. check for pifof glusterfsd single process should be visible
        """
        volume_config = {
            'name': 'test',
            'servers': self.all_servers[:3],
            'voltype': {
                'type': 'replicated',
                'replica_count': 3,
                'transport': 'tcp'
            }
        }

        servers = self.all_servers[:3]
        # Volume Creation
        ret = bulk_volume_creation(self.mnode,
                                   14,
                                   self.all_servers_info,
                                   volume_config,
                                   is_create_only=True)
        self.assertTrue(ret, "Volume creation Failed")
        ret = set_volume_options(self.mnode, 'all',
                                 {'cluster.brick-multiplex': 'enable'})
        self.assertTrue(ret)
        vol_list = get_volume_list(self.mnode)
        for volname in vol_list:
            if vol_list.index(volname) == 2:
                g.run(servers[2], "reboot")
            ret, out, _ = volume_start(self.mnode, volname)
            self.assertFalse(ret,
                             "Failed to start volume '{}'".format(volname))

        for _ in range(10):
            sleep(1)
            _, node_result = are_nodes_online(servers[2])
            self.assertTrue(node_result, "Node is not Online")

        for server in servers:
            ret, out, _ = g.run(server, "pgrep glusterfsd")
            out = out.split()
            self.assertFalse(ret, "Failed to get 'glusterfsd' pid")
            self.assertEqual(len(out), 1,
                             "More then 1 brick process  seen in glusterfsd")
    def tearDown(self):
        """
        Clean up the volume and umount volume from client
        """

        # Cleanup volume
        volume_list = get_volume_list(self.mnode)
        for volume in volume_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to cleanup Volume %s" % volume)
        g.log.info("Successfully Cleaned up all Volumes")

        # Calling GlusterBaseClass teardown
        self.get_super_method(self, 'tearDown')()
コード例 #29
0
 def tearDown(self):
     """
     Cleanup and umount volume
     """
     # Cleanup and umount volume
     for vol in get_volume_list(self.mnode):
         ret = cleanup_volume(self.mnode, vol)
         if not ret:
             raise ExecutionError("Failed to  Cleanup Volume")
         ret = set_volume_options(self.mnode, 'all',
                                  {'cluster.brick-multiplex': 'disable'})
         if not ret:
             raise ExecutionError("Failed to set volume option")
         # Calling GlusterBaseClass teardown
         self.get_super_method(self, 'tearDown')()
コード例 #30
0
def match_heketi_and_gluster_block_volumes_by_prefix(gluster_pod,
                                                     heketi_block_volumes,
                                                     block_vol_prefix,
                                                     hostname=None):
    """Match block volumes from heketi and gluster. This function can't
       be used for block volumes with custom prefixes

    Args:
        gluster_pod (podcmd | str): gluster pod class object has gluster
                                    pod and ocp master node or gluster
                                    pod name
        heketi_block_volumes (list): list of heketi block volumes with
                                     which gluster block volumes need to
                                     be matched
        block_vol_prefix (str): block volume prefix by which the block
                                volumes needs to be filtered
        hostname (str): ocp master node on which oc command gets executed

    """
    gluster_pod = _get_gluster_pod(gluster_pod, hostname)

    gluster_vol_list = get_volume_list(gluster_pod)

    gluster_vol_block_list = []
    for gluster_vol in gluster_vol_list[1:]:
        ret, out, err = block_list(gluster_pod, gluster_vol)
        try:
            if ret != 0 and json.loads(out)["RESULT"] == "FAIL":
                msg = "failed to get block volume list with error: %s" % err
                g.log.error(msg)
                raise AssertionError(msg)
        except Exception as e:
            g.log.error(e)
            raise

        gluster_vol_block_list.extend([
            block_vol.replace(block_vol_prefix, "")
            for block_vol in json.loads(out)["blocks"]
            if block_vol.startswith(block_vol_prefix)
        ])

    if cmp(sorted(gluster_vol_block_list), heketi_block_volumes) != 0:
        err_msg = "Gluster and Heketi Block volume list match failed"
        err_msg += "\nGluster Volumes: %s, " % gluster_vol_block_list
        err_msg += "\nBlock volumes %s" % heketi_block_volumes
        err_msg += "\nDifference: %s" % (set(gluster_vol_block_list)
                                         ^ set(heketi_block_volumes))
        raise AssertionError(err_msg)
コード例 #31
0
    def tearDown(self):
        """
        If test method failed before validating IO, tearDown waits for the
        IO's to complete and checks for the IO exit status

        Cleanup and umount volume
        """
        if not self.io_validation_complete:
            g.log.info("Wait for IO to complete as IO validation did not "
                       "succeed in test method")
            ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
            if not ret:
                raise ExecutionError("IO failed on some of the clients")
            g.log.info("IO is successful on all mounts")

            # List all files and dirs created
            g.log.info("List all files and directories:")
            ret = list_all_files_and_dirs_mounts(self.mounts)
            if not ret:
                raise ExecutionError("Failed to list all files and dirs")
            g.log.info("Listing all files and directories is successful")

        # umount all volumes
        for mount_point in self.mount_points:
            ret, _, _ = umount_volume(self.client, mount_point)
            if ret:
                raise ExecutionError("Failed to umount on volume %s " %
                                     self.volname)
            g.log.info("Successfully umounted %s on client %s", self.volname,
                       self.client)
            ret = rmdir(self.client, mount_point)
            if not ret:
                raise ExecutionError(
                    "Failed to remove directory mount directory.")
            g.log.info("Mount directory is removed successfully")

        # stopping all volumes
        volume_list = get_volume_list(self.mnode)
        for volume in volume_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to cleanup Volume %s" % volume)
            g.log.info("Volume: %s cleanup is done", volume)
        g.log.info("Successfully Cleanedup all Volumes")

        # calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
コード例 #32
0
def wait_to_heal_complete(timeout=300, wait_step=5):
    """Monitors heal for volumes on gluster"""
    gluster_vol_list = get_volume_list("auto_get_gluster_endpoint")
    if not gluster_vol_list:
        raise AssertionError("failed to get gluster volume list")

    _waiter = waiter.Waiter(timeout=timeout, interval=wait_step)
    for gluster_vol in gluster_vol_list:
        for w in _waiter:
            if is_heal_complete("auto_get_gluster_endpoint", gluster_vol):
                break

    if w.expired:
        err_msg = ("reached timeout waiting for all the gluster volumes "
                   "to reach the 'healed' state.")
        g.log.error(err_msg)
        raise AssertionError(err_msg)
コード例 #33
0
    def tearDownClass(cls):
        """
        Clean up the volume and umount volume from client
        """

        # stopping the volume
        g.log.info("Starting to Cleanup all Volumes")
        volume_list = get_volume_list(cls.mnode)
        for volume in volume_list:
            ret = cleanup_volume(cls.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to cleanup Volume %s" % volume)
            g.log.info("Volume: %s cleanup is done", volume)
        g.log.info("Successfully Cleanedup all Volumes")

        # calling GlusterBaseClass tearDownClass
        cls.get_super_method(cls, 'tearDownClass')()
コード例 #34
0
def match_heketi_and_gluster_block_volumes_by_prefix(
        heketi_block_volumes, block_vol_prefix):
    """Match block volumes from heketi and gluster. This function can't
       be used for block volumes with custom prefixes

    Args:
        heketi_block_volumes (list): list of heketi block volumes with
                                     which gluster block volumes need to
                                     be matched
        block_vol_prefix (str): block volume prefix by which the block
                                volumes needs to be filtered
    """
    gluster_vol_list = get_volume_list("auto_get_gluster_endpoint")

    gluster_vol_block_list = []
    for gluster_vol in gluster_vol_list[1:]:
        ret, out, err = block_list("auto_get_gluster_endpoint", gluster_vol)
        try:
            if ret != 0 and json.loads(out)["RESULT"] == "FAIL":
                msg = "failed to get block volume list with error: %s" % err
                g.log.error(msg)
                raise AssertionError(msg)
        except Exception as e:
            g.log.error(e)
            raise

        gluster_vol_block_list.extend([
            block_vol.replace(block_vol_prefix, "")
            for block_vol in json.loads(out)["blocks"]
            if block_vol.startswith(block_vol_prefix)
        ])

    if cmp(sorted(gluster_vol_block_list), heketi_block_volumes) != 0:
        err_msg = "Gluster and Heketi Block volume list match failed"
        err_msg += "\nGluster Volumes: %s, " % gluster_vol_block_list
        err_msg += "\nBlock volumes %s" % heketi_block_volumes
        err_msg += "\nDifference: %s" % (set(gluster_vol_block_list) ^
                                         set(heketi_block_volumes))
        raise AssertionError(err_msg)