Пример #1
0
def add_extra_servers_to_cluster(mnode, extra_servers):
    """Adds the given extra servers to cluster

    Args:
        mnode (str): Node on which cmd has to be executed.
        extra_servers (str|list) : A server|list of extra servers to be
            attached to cluster

    Returns:
        bool: True, if extra servers are attached to cluster
              False, otherwise

    Example:
        add_extra_servers_to_cluster("abc.com", ['peer_node1','peer_node2'])
    """

    if not isinstance(extra_servers, list):
        extra_servers = [extra_servers]

    ret = start_glusterd(servers=extra_servers)
    if not ret:
        g.log.error("glusterd did not start in peer nodes")
        return False

    ret = peer_probe_servers(mnode, servers=extra_servers)
    if not ret:
        g.log.error("Unable to do peer probe on extra server machines")
        return False

    return True
    def tearDown(self):

        # start the volume, it should succeed
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Volume stop failed")

        # start glusterd on all servers
        ret = start_glusterd(self.servers)
        if not ret:
            raise ExecutionError("Failed to start glusterd on all servers")

        for server in self.servers:
            ret = wait_for_peers_to_connect(server, self.servers)
            if not ret:
                ret = peer_probe_servers(server, self.servers)
                if not ret:
                    raise ExecutionError("Failed to peer probe all "
                                         "the servers")

        # clean up all volumes
        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume % s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        self.get_super_method(self, 'tearDown')()
Пример #3
0
    def setUpClass(cls):
        GlusterBaseClass.setUpClass.im_func(cls)
        g.log.info("Starting %s " % cls.__name__)
        '''
        checking for peer status from every node, if peers are  in not
        connected state, performing peer probe.
        '''
        ret = cls.validate_peers_are_connected()
        if not ret:
            ret = peer_probe_servers(cls.mnode, cls.servers)
            if ret:
                g.log.info("peers are connected successfully from %s to other \
                servers in severlist %s:" % (cls.mnode, cls.servers))
            else:
                g.log.error("Peer probe failed from %s to other \
                servers in severlist %s:" % (cls.mnode, cls.servers))
                raise ExecutionError("Peer probe failed ")
        else:
            g.log.info("All server peers are already in connected state\
            %s:" % cls.servers)

        # Creating Volume
        g.log.info("Started creating volume")
        ret = cls.setup_volume()
        if ret:
            g.log.info("Volme created successfully : %s" % cls.volname)
        else:
            raise ExecutionError("Volume creation failed: %s" % cls.volname)
Пример #4
0
    def tearDown(self):

        # start glusterd on all servers
        ret = start_glusterd(self.servers)
        if not ret:
            raise ExecutionError("Failed to start glusterd on all servers")

        for server in self.servers:
            ret = is_peer_connected(server, self.servers)
            if not ret:
                ret = peer_probe_servers(server, self.servers)
                if not ret:
                    raise ExecutionError("Failed to peer probe all "
                                         "the servers")

        # clean up all volumes
        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume % s" % volume)
            g.log.info("Volume deleted successfully : %s" % volume)

        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):

        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed Cleanup the volume")
            g.log.info("Volume deleted successfully %s", volume)

        # Setting quorum ratio to 51%
        ret = set_volume_options(self.mnode, 'all',
                                 {'cluster.server-quorum-ratio': '51%'})
        if not ret:
            raise ExecutionError("Failed to set server quorum ratio on %s" %
                                 self.volname)

        # Peer probe servers since we are doing peer detach in setUpClass
        for server in self.servers:
            ret = is_peer_connected(server, self.servers)
            if not ret:
                ret = peer_probe_servers(server, self.servers)
                if not ret:
                    raise ExecutionError(
                        "Peer probe failed to one of the node")
                g.log.info("Peer probe successful")

        self.get_super_method(self, 'tearDown')()
Пример #6
0
    def tearDown(self):

        # unmount the volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volume unmount failed for %s" % self.volname)

        # get volumes list and clean up all the volumes
        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Error while getting vol list")
        else:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if ret is True:
                    g.log.info("Volume deleted successfully : %s", volume)
                else:
                    raise ExecutionError("Failed Cleanup the"
                                         " Volume %s" % volume)

        # peer probe all the servers
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Peer probe failed to all the servers from "
                                 "the node.")

        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):
        """
        tearDown for every test
        """
        ret = is_glusterd_running(self.servers)
        if ret:
            ret = start_glusterd(self.servers)
            if not ret:
                raise ExecutionError("Failed to start glusterd on %s" %
                                     self.servers)
        # Takes 5 seconds to restart glusterd into peer connected state
        sleep(5)
        g.log.info("Glusterd started successfully on %s", self.servers)

        # checking for peer status from every node
        ret = is_peer_connected(self.mnode, self.servers)
        if not ret:
            ret = peer_probe_servers(self.mnode, self.servers)
            if not ret:
                raise ExecutionError("Failed to peer probe failed in "
                                     "servers %s" % self.servers)

        # stopping the volume and Cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
        g.log.info("Volume deleted successfully : %s", self.volname)

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
Пример #8
0
    def setUp(self):
        """
        """
        GlusterBaseClass.setUp.im_func(self)
        # Peer probe servers
        g.log.info("Peer Probe servers '%s'", self.servers)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError(
                "Failed to peer probe some or all servers %s "
                "into the cluster", self.servers)
        g.log.info("Successfully peer probed all servers '%s' to the cluster",
                   self.servers)

        # Validate if peers are connected from each server
        g.log.info(
            "Validating if servers %s are connected from other servers "
            "in the cluster", self.servers)
        for server in self.servers:
            ret = is_peer_connected(server, self.servers)
            if not ret:
                raise ExecutionError(
                    "Some or all servers %s are not "
                    "in connected state from node %s", self.servers,
                    self.mnode)
            g.log.info(
                "Successfully validated servers %s are all "
                "in connected state from node %s", self.servers, self.mnode)
        g.log.info(
            "Successfully validated all servers %s are in connected "
            "state from other servers in the cluster", self.servers)
Пример #9
0
    def tearDown(self):

        # Cleanup and umount volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)
        g.log.info("Successful in umounting the volume and Cleanup")

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):

        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                # check all bricks are online
                ret = wait_for_bricks_to_be_online(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to bring bricks online"
                                         "for volume %s" % volume)
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe peer "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        self.get_super_method(self, 'tearDown')()
    def test_volume_status_xml(self):

        # create a two node cluster
        ret = peer_probe_servers(self.servers[0], self.servers[1])
        self.assertTrue(
            ret,
            "Peer probe failed to %s from %s" % (self.mnode, self.servers[1]))

        # create a distributed volume with single node
        number_of_bricks = 1
        servers_info_from_single_node = {}
        servers_info_from_single_node[self.servers[0]] = self.all_servers_info[
            self.servers[0]]

        bricks_list = form_bricks_list(self.mnode, self.volname,
                                       number_of_bricks, self.servers[0],
                                       servers_info_from_single_node)
        ret, _, _ = volume_create(self.servers[0], self.volname, bricks_list)
        self.assertEqual(ret, 0, "Volume creation failed")
        g.log.info("Volume %s created successfully", self.volname)

        # Get volume status
        ret, _, err = volume_status(self.servers[1], self.volname)
        self.assertNotEqual(ret, 0, ("Unexpected: volume status is success for"
                                     " %s, even though volume is not started "
                                     "yet" % self.volname))
        self.assertIn("is not started", err, ("volume status exited with"
                                              " incorrect error message"))

        # Get volume status with --xml
        vol_status = get_volume_status(self.servers[1], self.volname)
        self.assertIsNone(vol_status, ("Unexpected: volume status --xml for %s"
                                       " is success even though the volume is"
                                       " not stared yet" % self.volname))

        # start the volume
        ret, _, _ = volume_start(self.servers[1], self.volname)
        self.assertEqual(ret, 0, "Failed to start volume %s" % self.volname)

        # Get volume status
        ret, _, _ = volume_status(self.servers[1], self.volname)
        self.assertEqual(ret, 0,
                         ("Failed to get volume status for %s" % self.volname))

        # Get volume status with --xml
        vol_status = get_volume_status(self.servers[1], self.volname)
        self.assertIsNotNone(vol_status,
                             ("Failed to get volume "
                              "status --xml for %s" % self.volname))

        # Verify there are no crashes while executing gluster volume status
        status = True
        glusterd_log = (self._get_test_specific_glusterd_log(
            self.mnode).split("\n"))
        for line in glusterd_log:
            if ' E ' in glusterd_log:
                status = False
                g.log.info("Unexpected! Error found %s", line)

        self.assertTrue(status, "Error found in glusterd logs")
Пример #12
0
    def tearDown(self):
        # Add the removed services in firewall
        for service in ('glusterfs', 'rpc-bind'):
            for option in ("", " --permanent"):
                cmd = ("firewall-cmd --zone=public --add-service={}{}".format(
                    service, option))
                ret, _, _ = g.run(self.node_to_probe, cmd)
                if ret:
                    raise ExecutionError("Failed to add firewall service %s "
                                         "on %s" %
                                         (service, self.node_to_probe))

        # Detach servers from cluster
        pool = nodes_from_pool_list(self.mnode)
        self.assertIsNotNone(pool, "Failed to get pool list")
        for node in pool:
            if not peer_detach(self.mnode, node):
                raise ExecutionError("Failed to detach %s from %s" %
                                     (node, self.mnode))
        # Create a cluster
        if not peer_probe_servers(self.mnode, self.servers):
            raise ExecutionError("Failed to probe peer "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        self.get_super_method(self, 'tearDown')()
Пример #13
0
    def tearDown(self):
        # stopping the volume and Cleaning up the volume
        self.get_super_method(self, 'tearDown')()
        ret = is_glusterd_running(self.servers)
        if ret:
            ret = start_glusterd(self.servers)
            if not ret:
                raise ExecutionError("Failed to start glusterd on %s" %
                                     self.servers)
        # Takes 5 seconds to restart glusterd into peer connected state
        sleep(5)
        g.log.info("Glusterd started successfully on %s", self.servers)

        # checking for peer status from every node
        ret = is_peer_connected(self.mnode, self.servers)
        if not ret:
            ret = peer_probe_servers(self.mnode, self.servers)
            if not ret:
                raise ExecutionError("Failed to peer probe failed in "
                                     "servers %s" % self.servers)
        g.log.info("All peers are in connected state")
        vol_list = get_volume_list(self.mnode)
        if vol_list is None:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed Cleanup the Volume")
        g.log.info("Volume deleted successfully")
Пример #14
0
    def tearDown(self):

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        # clean up all volumes and detaches peers from cluster

        vol_list = get_volume_list(self.mnode)
        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to Cleanup the "
                                     "Volume %s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        # Calling GlusterBaseClass tearDown
        GlusterBaseClass.tearDown.im_func(self)
Пример #15
0
    def scratch_cleanup(cls, error_or_failure_exists):
        """
        This scratch_cleanup script will run only when the code
        currently running goes into execution or assertion error.

        Args:
            error_or_failure_exists (bool): If set True will cleanup setup
                atlast of testcase only if exectution or assertion error in
                teststeps. False will skip this scratch cleanup step.

        Returns (bool): True if setup cleanup is successful.
            False otherwise.
        """
        if error_or_failure_exists:
            ret = stop_glusterd(cls.servers)
            if not ret:
                g.log.error("Failed to stop glusterd")
                cmd_list = ("pkill pidof glusterd",
                            "rm /var/run/glusterd.socket")
                for server in cls.servers:
                    for cmd in cmd_list:
                        ret, _, _ = g.run(server, cmd, "root")
                        if ret:
                            g.log.error("Failed to stop glusterd")
                            return False
            for server in cls.servers:
                cmd_list = ("rm -rf /var/lib/glusterd/vols/*",
                            "rm -rf /var/lib/glusterd/snaps/*",
                            "rm -rf /var/lib/glusterd/peers/*",
                            "rm -rf {}/*/*".format(
                                cls.all_servers_info[server]['brick_root']))
                for cmd in cmd_list:
                    ret, _, _ = g.run(server, cmd, "root")
                    if ret:
                        g.log.error(
                            "failed to cleanup server {}".format(server))
                        return False
            ret = restart_glusterd(cls.servers)
            if not ret:
                g.log.error("Failed to start glusterd")
                return False
            sleep(2)
            ret = wait_for_glusterd_to_start(cls.servers)
            if not ret:
                g.log.error("Failed to bring glusterd up")
                return False
            ret = peer_probe_servers(cls.mnode, cls.servers)
            if not ret:
                g.log.error("Failed to peer probe servers")
                return False
            for client in cls.clients:
                cmd_list = ("umount /mnt/*", "rm -rf /mnt/*")
                for cmd in cmd_list:
                    ret = g.run(client, cmd, "root")
                    if ret:
                        g.log.error(
                            "failed to unmount/already unmounted {}".format(
                                client))
            return True
    def test_uuid_in_volume_info_xml(self):

        # create a two node cluster
        ret = peer_probe_servers(self.servers[0], self.servers[1])
        self.assertTrue(
            ret,
            "Peer probe failed to %s from %s" % (self.mnode, self.servers[1]))

        # create a 2x2 volume
        servers_info_from_two_node_cluster = {}
        for server in self.servers[0:2]:
            servers_info_from_two_node_cluster[server] = self.all_servers_info[
                server]

        self.volume['servers'] = self.servers[0:2]
        self.volume['voltype']['replica_count'] = 2
        self.volume['voltype']['dist_count'] = 2
        ret = setup_volume(self.mnode, servers_info_from_two_node_cluster,
                           self.volume)
        self.assertTrue(ret, ("Failed to create"
                              "and start volume %s" % self.volname))

        # probe a new node from cluster
        ret = peer_probe_servers(self.mnode, self.servers[2])
        self.assertTrue(
            ret,
            "Peer probe failed to %s from %s" % (self.mnode, self.servers[2]))

        # check gluster vol info --xml from newly probed node
        xml_output = get_volume_info(self.servers[2], self.volname)
        self.assertIsNotNone(xml_output,
                             ("Failed to get volume info --xml for"
                              "volume %s from newly probed node %s" %
                              (self.volname, self.servers[2])))

        # volume info --xml should have non zero UUID for host and brick
        uuid_with_zeros = '00000000-0000-0000-0000-000000000000'
        len_of_uuid = len(uuid_with_zeros)
        number_of_bricks = int(xml_output[self.volname]['brickCount'])
        for i in range(number_of_bricks):
            uuid = xml_output[self.volname]['bricks']['brick'][i]['hostUuid']
            self.assertEqual(len(uuid), len_of_uuid, "Invalid uuid length")
            self.assertNotEqual(uuid, uuid_with_zeros,
                                ("Invalid uuid %s" % uuid))
    def tearDown(self):
        """
        tearDown for every test
        """
        # Peer probe detached server
        ret = peer_probe_servers(self.mnode, self.random_server)
        if not ret:
            raise ExecutionError(ret, "Failed to probe detached server")
        g.log.info("peer probe is successful for %s", self.random_server)

        # Calling GlusterBaseClass tearDown
        GlusterBaseClass.tearDown.im_func(self)
    def test_peer_probe_snapd_running(self):
        '''
        -> Create Volume
        -> Create snap for that volume
        -> Enable uss
        -> Check snapd running or not
        -> Probe a new node while snapd is running
        '''

        # Performing node detach, Here detached node considering as extra
        # server
        extra_node = self.servers[-1]
        ret, _, _ = peer_detach(self.mnode, extra_node)
        self.assertEqual(ret, 0, "Peer detach failed for %s" % extra_node)
        g.log.info("Peer detach success for %s", extra_node)

        # Removing detached node from 'self.servers' list, it's because of
        # 'self.setup_volume' function checking peer status of 'self.servers'
        # list before creating volume
        self.servers.remove(extra_node)

        # Creating volume
        ret = self.setup_volume()
        self.assertTrue(ret, "Failed Create volume %s" % self.volname)
        g.log.info("Volume created successfully %s", self.volname)

        # Adding node back into self.servers list
        self.servers.append(extra_node)

        # creating Snap
        ret, _, _ = snap_create(self.mnode, self.volname, 'snap1')
        self.assertEqual(ret, 0,
                         "Snap creation failed for volume %s" % self.volname)
        g.log.info("Snap created successfully for volume %s", self.volname)

        # Enabling Snapd(USS)
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0,
                         "Failed to enable USS for volume %s" % self.volname)
        g.log.info("USS Enabled successfully on volume %s", self.volname)

        # Checking snapd running or not
        ret = is_snapd_running(self.mnode, self.volname)
        self.assertTrue(ret, "Snapd not running for volume %s" % self.volname)
        g.log.info("snapd running for volume %s", self.volname)

        # Probing new node
        ret = peer_probe_servers(self.mnode, extra_node)
        self.assertTrue(ret,
                        "Peer Probe failed for new server %s" % extra_node)
        g.log.info("Peer Probe success for new server %s", extra_node)
Пример #19
0
    def tearDown(self):

        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to peer probe servers")

        # stopping the volume and Cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)
        g.log.info("Volume deleted successfully : %s", self.volname)

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
Пример #20
0
    def test_volume_status_xml(self):

        # create a two node cluster
        ret = peer_probe_servers(self.servers[0], self.servers[1])
        self.assertTrue(
            ret,
            "Peer probe failed to %s from %s" % (self.mnode, self.servers[1]))

        # create a distributed volume with single node
        number_of_bricks = 1
        servers_info_from_single_node = {}
        servers_info_from_single_node[self.servers[0]] = self.all_servers_info[
            self.servers[0]]

        bricks_list = form_bricks_list(self.mnode, self.volname,
                                       number_of_bricks, self.servers[0],
                                       servers_info_from_single_node)
        ret, _, _ = volume_create(self.servers[0], self.volname, bricks_list)
        self.assertEqual(ret, 0, "Volume creation failed")
        g.log.info("Volume %s created successfully", self.volname)

        # Get volume status
        ret, _, err = volume_status(self.servers[1], self.volname)
        self.assertNotEqual(ret, 0, ("Unexpected: volume status is success for"
                                     " %s, even though volume is not started "
                                     "yet" % self.volname))
        self.assertIn("is not started", err, ("volume status exited with"
                                              " incorrect error message"))

        # Get volume status with --xml
        vol_status = get_volume_status(self.servers[1], self.volname)
        self.assertIsNone(vol_status, ("Unexpected: volume status --xml for %s"
                                       " is success even though the volume is"
                                       " not stared yet" % self.volname))

        # start the volume
        ret, _, _ = volume_start(self.servers[1], self.volname)
        self.assertEqual(ret, 0, "Failed to start volume %s" % self.volname)

        # Get volume status
        ret, _, _ = volume_status(self.servers[1], self.volname)
        self.assertEqual(ret, 0,
                         ("Failed to get volume status for %s" % self.volname))

        # Get volume status with --xml
        vol_status = get_volume_status(self.servers[1], self.volname)
        self.assertIsNotNone(vol_status,
                             ("Failed to get volume "
                              "status --xml for %s" % self.volname))
    def tearDown(self):
        """
        tearDown for every test
        """
        # checking for peer status from every node
        ret = is_peer_connected(self.mnode, self.servers)
        if not ret:
            ret = peer_probe_servers(self.mnode, self.random_server)
            if not ret:
                raise ExecutionError("Failed to peer probe failed in "
                                     "servers %s" % self.random_server)
        g.log.info("All peers are in connected state")

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
Пример #22
0
    def tearDown(self):

        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)

            # stopping the volume and Cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed to Cleanup the Volume %s" %
                                 self.volname)
        g.log.info("Volume deleted successfully : %s", self.volname)

        # calling GlusterBaseClass tearDown
        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):
        """Detach servers from cluster"""
        pool = nodes_from_pool_list(self.mnode)
        self.assertIsNotNone(pool, "Failed to get pool list")
        for node in pool:
            if not peer_detach(self.mnode, node):
                raise ExecutionError("Failed to detach %s from %s" %
                                     (node, self.mnode))
        # Create a cluster
        if not peer_probe_servers(self.mnode, self.servers):
            raise ExecutionError("Failed to probe peer "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        self.get_super_method(self, 'tearDown')()
    def tearDown(self):

        # Unmount and clean volume
        if not self.unmount_volume_and_cleanup_volume([self.mounts[0]]):
            raise ExecutionError("Failed to Cleanup Volume")

        # Probe detached node in case it's still detached
        if self.is_peer_detached:
            if not peer_probe_servers(self.mnode, self.servers[5]):
                raise ExecutionError("Failed to probe detached "
                                     "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
Пример #25
0
    def tearDown(self):
        """
        tearDown for every test
        """
        if self.detach_peer:
            ret = peer_probe_servers(self.mnode, self.servers[5])
            if not ret:
                raise ExecutionError("Peer probe failed for detached "
                                     "server %s" % self.servers[5])

        # stopping the volume and Cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)

        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
    def tearDown(self):

        # stopping and cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed to Cleanup the Volume %s" %
                                 self.volname)

        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)

        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        self.get_super_method(self, 'tearDown')()
Пример #27
0
    def tearDown(self):

        # stopping and cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed to Cleanup the Volume %s" %
                                 self.volname)

        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)

        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):
        # Reset firewall services to the zone
        if not self.firewall_added:
            ret = self._add_firewall_services(self.servers[:2])
            if not ret:
                raise ExecutionError("Failed to add firewall services")

        # Reload firewall services
        ret = self._reload_firewall_service(self.servers[:2])
        if not ret:
            raise ExecutionError("Failed to reload firewall services")

        # Cleanup the volumes and unmount it, if mounted
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = is_mounted(volume,
                                 mpoint="/mnt/distribute-vol",
                                 mserver=self.mnode,
                                 mclient=self.servers[1],
                                 mtype="glusterfs")
                if ret:
                    ret, _, _ = umount_volume(mclient=self.servers[1],
                                              mpoint="/mnt/distribute-vol")
                    if ret:
                        raise ExecutionError("Failed to unmount volume")

                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume cleaned up successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        self.get_super_method(self, 'tearDown')()
    def tearDown(self):

        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = umount_volume(self.mounts[0].client_system,
                            self.mounts[0].mountpoint,
                            mtype=self.mount_type)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)

        # Clean up all volumes and peer probe to form cluster
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume deleted successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)

        bricks = get_servers_bricks_dict(self.servers, self.all_servers_info)

        # Checking brick dir and cleaning it.
        for server in self.servers:
            for brick in bricks[server]:
                if get_dir_contents(server, brick):
                    cmd = "rm -rf " + brick + "/*"
                    ret, _, _ = g.run(server, cmd)
                    if ret:
                        raise ExecutionError("Failed to delete the brick "
                                             "dirs of deleted volume.")

        self.get_super_method(self, 'tearDown')()
    def tearDown(self):
        """
        tearDown for every test
        """
        # stopping the volume and Cleaning up the volume
        ret = self.cleanup_volume()
        if not ret:
            raise ExecutionError("Failed Cleanup the Volume %s" % self.volname)

        # Checking peers are in connected state or not
        ret = self.validate_peers_are_connected()
        if not ret:
            # Peer probe detached servers
            pool = nodes_from_pool_list(self.mnode)
            for node in pool:
                peer_detach(self.mnode, node)
            ret = peer_probe_servers(self.mnode, self.servers)
            if not ret:
                raise ExecutionError("Failed to probe detached servers %s" %
                                     self.servers)
        # Calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()