示例#1
0
    def test_peer_probe_invalid_ip_nonexist_host_nonexist_ip(self):
        '''
        Test script to verify peer probe non existing ip,
        non_exsting_host and invalid-ip, peer probe has to
        be fail for invalid-ip, non-existing-ip and
        non existing host, verify Glusterd services up and
        running or not after invalid peer probe,
        and core file should not get created
        under "/", /var/log/core and /tmp  directory
        '''
        ret, test_timestamp, _ = g.run_local('date +%s')
        test_timestamp = test_timestamp.strip()
        g.log.info("Running Test : %s", self.id())

        # Assigning non existing ip to variable
        self.non_exist_ip = '256.256.256.256'

        # Assigning invalid ip to variable
        self.invalid_ip = '10.11.a'

        # Assigning non existing host to variable
        self.non_exist_host = 'abc.lab.eng.blr.redhat.com'

        # Peer probe checks for non existing host
        g.log.info("peer probe checking for non existing host")
        ret, _, _ = peer_probe(self.mnode, self.non_exist_host)
        self.assertNotEqual(
            ret, 0, "peer probe should fail for "
            "non existhost: %s" % self.non_exist_host)
        g.log.info("peer probe failed for non existing host")

        # Peer probe checks for invalid ip
        g.log.info("peer probe checking for invalid ip")
        ret, _, _ = peer_probe(self.mnode, self.invalid_ip)
        self.assertNotEqual(
            ret, 0, "peer probe shouldfail for "
            "invalid ip: %s" % self.invalid_ip)
        g.log.info("peer probe failed for invalid_ip")

        # peer probe checks for non existing ip
        g.log.info("peer probe checking for non existing ip")
        ret, _, _ = peer_probe(self.mnode, self.non_exist_ip)
        self.assertNotEqual(
            ret, 0, "peer probe should fail for non exist "
            "ip :%s" % self.non_exist_ip)
        g.log.info("peer probe failed for non existing ip")

        # Checks Glusterd services running or not after peer probe
        # to invalid host and non existing host

        self.mnode_list = []
        self.mnode_list.append(self.mnode)
        ret = is_glusterd_running(self.mnode_list)
        self.assertEqual(ret, 0, "Glusterd service should be running")

        # Chekcing core file created or not in "/", "/tmp" and
        # "/var/log/core" directory
        ret = is_core_file_created(self.servers, test_timestamp)
        self.assertTrue(ret, "core file found")
示例#2
0
    def test_nodes_from_pool_list(self):
        """Testing nodes from pool list and peer probe by hostname or IP
        """
        # Get list of nodes from 'gluster pool list'
        nodes_in_pool_list = nodes_from_pool_list(self.mnode)
        if nodes_in_pool_list is None:
            g.log.error(
                "Unable to get nodes from gluster pool list "
                "from node %s", self.mnode)
        else:
            g.log.info("Nodes in pool: %s", nodes_in_pool_list)

        # Peer probe by hostname if node in nodes_in_pool_list is IP or
        # Peer probe by IP if node in nodes_in_pool_list is hostname
        for node in nodes_in_pool_list:
            if socket.gethostbyname(node) == node:
                node = socket.gethostbyaddr(node)[0]
            else:
                node = socket.gethostbyname(node)
            if node:
                g.log.info("Peer probe node %s from %s", node, self.mnode)
                ret, out, err = peer_probe(self.mnode, node)
                self.assertFalse(
                    (ret != 0 or
                     re.search(r'^peer\sprobe\:\ssuccess(.*)', out) is None),
                    ("Failed to peer probe %s from node %s", node, self.mnode))
                g.log.info("Successfully peer probed %s from node %s", node,
                           self.mnode)
 def tearDown(self):
     g.log.info("Peering any nodes which are not part of cluster as "
                "part of cleanup")
     for server in self.servers:
         if not is_peer_connected(self.mnode, server):
             ret, _, err = peer_probe(self.mnode, server)
             if ret:
                 raise ExecutionError("Peer probe failed with %s " % err)
示例#4
0
    def tearDown(self):

        # Peer probe node which was detached
        ret, _, _ = peer_probe(self.mnode, self.servers[1])
        if ret:
            raise ExecutionError("Failed to detach %s" % self.servers[1])
        g.log.info("Peer detach successful %s", self.servers[1])

        self.get_super_method(self, 'tearDown')()
 def _probe_peer(self, node, should_fail=False):
     """ Peer probe node """
     ret, _, _ = peer_probe(self.mnode, node)
     if should_fail:
         self.assertNotEqual(
             ret, 0, "Unexpected: Successfully peer probe"
             " node: %s" % node)
     else:
         self.assertEqual(ret, 0, "Failed to peer probe node: %s" % node)
    def tearDown(self):

        # Peer probe node which was detached
        ret, _, _ = peer_probe(self.mnode, self.servers[1])
        if ret:
            raise ExecutionError("Failed to detach %s" % self.servers[1])
        g.log.info("Peer detach successful %s", self.servers[1])

        GlusterBaseClass.tearDown.im_func(self)
示例#7
0
    def test_verify_peer_probe_with_firewall_ports_not_opened(self):
        """
        Test Steps:
        1. Open glusterd port only in  Node1 using firewall-cmd command
        2. Perform peer probe to Node2 from Node 1
        3. Verify glusterd.log for Errors
        4. Check for core files created
        """

        ret, test_timestamp, _ = g.run_local('date +%s')
        test_timestamp = test_timestamp.strip()

        # Remove firewall service on the node to probe to
        self._remove_firewall_service()

        # Try peer probe from mnode to node
        ret, _, err = peer_probe(self.mnode, self.node_to_probe)
        self.assertEqual(ret, 1, ("Unexpected behavior: Peer probe should"
                                  " fail when the firewall services are "
                                  "down but returned success"))

        expected_err = ('peer probe: failed: Probe returned with '
                        'Transport endpoint is not connected\n')
        self.assertEqual(
            err, expected_err,
            "Expected error {}, but returned {}".format(expected_err, err))
        msg = ("Peer probe of {} from {} failed as expected ".format(
            self.mnode, self.node_to_probe))
        g.log.info(msg)

        # Verify there are no glusterd crashes
        status = True
        glusterd_logs = (self._get_test_specific_glusterd_log(
            self.mnode).split("\n"))
        for line in glusterd_logs:
            if ' E ' in line:
                status = False
                g.log.info("Error found: ' %s '", line)

        self.assertTrue(status, "Glusterd crash found")

        # Verify no core files are created
        ret = is_core_file_created(self.servers, test_timestamp)
        self.assertTrue(ret, "Unexpected crash found.")
        g.log.info("No core file found as expected")
示例#8
0
    def tearDown(self):
        """
        Cleanup and umount volume
        """
        # Cleanup volume
        g.log.info("Starting to Cleanup Volume")
        ret = cleanup_volume(self.mnode, self.volname)
        if not ret:
            raise ExecutionError("Failed to cleanup Volume")
        g.log.info("Successful in Cleanup")

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in self.extra_servers:
            if node not in pool:
                ret = peer_probe(self.mnode, node)
                if not ret:
                    raise ExecutionError("Failed to probe detached server %s" %
                                         node)
        g.log.info("Peer probe success for detached servers %s", self.servers)

        # Calling GlusterBaseClass teardown
        GlusterBaseClass.tearDown.im_func(self)
    def tearDown(self):

        # Start rebalance for volume.
        g.log.info("Stopping rebalance on the volume")
        ret, _, _ = rebalance_stop(self.mnode, self.volname)
        if ret:
            raise ExecutionError("Failed to stop rebalance " "on the volume .")
        g.log.info("Successfully stopped rebalance on the volume %s",
                   self.volname)

        # Peer probe node which was detached
        ret, _, _ = peer_probe(self.mnode, self.servers[4])
        if ret:
            raise ExecutionError("Failed to probe %s" % self.servers[4])
        g.log.info("Peer probe successful %s", self.servers[4])

        # Wait till peers are in connected state
        count = 0
        while count < 60:
            ret = is_peer_connected(self.mnode, self.servers)
            if ret:
                break
            sleep(3)

        # Unmounting and cleaning volume
        ret, _, _ = umount_volume(mclient=self.mounts[0].client_system,
                                  mpoint=self.mounts[0].mountpoint)
        if ret:
            raise ExecutionError("Unable to unmount volume %s" % self.volname)
        g.log.info("Volume unmounted successfully  %s", self.volname)

        ret = cleanup_volume(self.mnode, self.volname)
        if not ret:
            raise ExecutionError("Unable to delete volume %s" % self.volname)
        g.log.info("Volume deleted successfully  %s", self.volname)
        GlusterBaseClass.tearDown.im_func(self)
    def test_add_identical_brick(self):
        """
        In this test case:
        1. Create Dist Volume on Node 1
        2. Down brick on Node 1
        3. Peer Probe N2 from N1
        4. Add identical brick on newly added node
        5. Check volume status
        """

        # pylint: disable=too-many-statements
        # Create a distributed volume on Node1
        number_of_brick = 1
        servers_info_from_single_node = {
            self.servers[0]: self.all_servers_info[self.servers[0]]
        }
        self.volname = "testvol"
        bricks_list = form_bricks_list(self.servers[0], self.volname,
                                       number_of_brick, self.servers[0],
                                       servers_info_from_single_node)
        ret, _, _ = volume_create(self.servers[0],
                                  self.volname,
                                  bricks_list,
                                  force=False)
        self.assertEqual(ret, 0, "Volume create failed")
        g.log.info("Volume %s created successfully", self.volname)

        ret, _, _ = volume_start(self.servers[0], self.volname, True)
        self.assertEqual(ret, 0, ("Failed to start the "
                                  "volume %s", self.volname))
        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        ret = bring_bricks_offline(self.volname, bricks_list[0])
        self.assertTrue(ret, "Failed to bring down the bricks")
        g.log.info("Successfully brought the bricks down")

        ret, _, _ = peer_probe(self.servers[0], self.servers[1])
        self.assertEqual(ret, 0, ("peer probe from %s to %s is failed",
                                  self.servers[0], self.servers[1]))
        g.log.info("peer probe is success from %s to "
                   "%s", self.servers[0], self.servers[1])

        # wait for some time before add-brick
        time.sleep(2)

        # Replace just host IP to create identical brick
        add_bricks = []
        add_bricks.append(
            string.replace(bricks_list[0], self.servers[0], self.servers[1]))
        ret, _, _ = add_brick(self.mnode, self.volname, add_bricks)
        self.assertEqual(ret, 0, "Failed to add the bricks to the volume")
        g.log.info("Successfully added bricks to volume %s", add_bricks[0])

        ret, _, _ = volume_start(self.mnode, self.volname, force=True)
        self.assertEqual(ret, 0, "Volume start with force failed")

        vol_status = get_volume_status(self.mnode, self.volname)
        self.assertIsNotNone(
            vol_status, "Failed to get volume "
            "status for %s" % self.volname)
示例#11
0
    def test_peer_probe_validation(self):
        # pylint: disable=too-many-statements
        '''
        -> Create trusted storage pool, by probing with networkshort names
        -> Create volume using IP of host
        -> perform basic operations like
            -> gluster volume start <vol>
            -> gluster volume info <vol>
            -> gluster volume status <vol>
            -> gluster volume stop <vol>
        -> Create a volume using the FQDN of the host
        -> perform basic operations like
            -> gluster volume start <vol>
            -> gluster volume info <vol>
            -> gluster volume status <vol>
            -> gluster volume stop <vol>
        '''
        # Peer probing using short name
        for server in self.servers[1:]:
            ret, hostname, _ = g.run(server, "hostname -s")
            self.assertEqual(ret, 0, ("Unable to get short name "
                                      "for server % s" % server))
            ret, _, _ = peer_probe(self.mnode, hostname)

            if ret == 1:
                ret, hostname, _ = g.run(server, "hostname")
                self.assertEqual(ret, 0, ("Unable to get short name "
                                          "for server % s" % server))

                hostname = hostname.split(".")[0]+"."+hostname.split(".")[1]
                ret, _, _ = peer_probe(self.mnode, hostname)

            self.assertEqual(ret, 0, "Unable to peer"
                             "probe to the server % s" % hostname)
            g.log.info("Peer probe succeeded for server %s", hostname)

        # Create a volume
        self.volname = "test-vol"
        self.brick_list = form_bricks_list(self.mnode, self.volname, 3,
                                           self.servers,
                                           self.all_servers_info)
        g.log.info("Creating a volume")
        ret, _, _ = volume_create(self.mnode, self.volname,
                                  self.brick_list, force=False)
        self.assertEqual(ret, 0, "Unable"
                         "to create volume % s" % self.volname)
        g.log.info("Volume created successfully % s", self.volname)

        # Start a volume
        g.log.info("Start a volume")
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Unable"
                         "to start volume % s" % self.volname)
        g.log.info("Volume started successfully % s", self.volname)

        # Get volume info
        g.log.info("get volume info")
        volinfo = get_volume_info(self.mnode, self.volname)
        self.assertIsNotNone(volinfo, "Failed to get the volume "
                                      "info for %s" % self.volname)

        # Get volume status
        vol_status = get_volume_status(self.mnode, self.volname)
        self.assertIsNotNone(vol_status, "Failed to get volume "
                                         "status for %s" % self.volname)

        # stop volume
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Unable"
                         "to stop volume % s" % self.volname)
        g.log.info("Volume stopped successfully % s", self.volname)

        # Create a volume
        self.volname = "test-vol-fqdn"

        self.brick_list = form_bricks_list(self.mnode, self.volname, 3,
                                           self.servers,
                                           self.all_servers_info)

        # Getting FQDN (Full qualified domain name) of each host and
        # replacing ip with FQDN name for each brick for example
        # 10.70.37.219:/bricks/brick0/vol1 is a brick, here ip is replaced
        # with FQDN name now brick looks like
        # dhcp35-219.lab.eng.blr.redhat.com:/bricks/brick0/vol1

        my_brick_list = []
        for brick in self.brick_list:
            fqdn_list = brick.split(":")
            fqdn = socket.getfqdn(fqdn_list[0])
            fqdn = fqdn + ":" + fqdn_list[1]
            my_brick_list.append(fqdn)

        g.log.info("Creating a volume")
        ret, _, _ = volume_create(self.mnode, self.volname,
                                  my_brick_list, force=False)
        self.assertEqual(ret, 0, "Unable"
                         "to create volume % s" % self.volname)
        g.log.info("Volume created successfully % s", self.volname)

        # Start a volume
        g.log.info("Start a volume")
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Unable"
                         "to start volume % s" % self.volname)
        g.log.info("Volume started successfully % s", self.volname)

        # Get volume info
        g.log.info("get volume info")
        volinfo = get_volume_info(self.mnode, self.volname)
        self.assertIsNotNone(volinfo, "Failed to get the volume "
                                      "info for %s" % self.volname)

        # Get volume status
        vol_status = get_volume_status(self.mnode, self.volname)
        self.assertIsNotNone(vol_status, "Failed to get volume "
                                         "status for %s" % self.volname)

        # stop volume
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Unable"
                         "to stop volume % s" % self.volname)
        g.log.info("Volume stopped successfully % s", self.volname)
    def test_spurious_rebalance(self):
        """
        In this test case:
        1. Trusted storage Pool of 3 nodes
        2. Create a distributed volumes with 3 bricks
        3. Start the volume
        4. Fuse mount the gluster volume on out of trusted nodes
        5. Remove a brick from the volume
        6. Check remove-brick status
        7. Stop the remove brick process
        8. Perform fix-layoyt on the volume
        9. Get the rebalance fix-layout status
       10. Create a directory from mount point
       11. Check trusted.glusterfs.dht extended attribue for newly
           created directory on the remove brick
        """

        # pylint: disable=too-many-statements
        my_servers = self.servers[0:3]
        my_server_info = {}
        for server in self.servers[0:3]:
            my_server_info[server] = self.all_servers_info[server]
        for index in range(1, 3):
            ret, _, _ = peer_probe(self.servers[0], self.servers[index])
            self.assertEqual(ret, 0, ("peer probe from %s to %s is failed",
                                      self.servers[0], self.servers[index]))
            g.log.info("peer probe is success from %s to "
                       "%s", self.servers[0], self.servers[index])

        self.volname = "testvol"
        bricks_list = form_bricks_list(self.mnode, self.volname, 3, my_servers,
                                       my_server_info)
        g.log.info("Creating a volume %s ", self.volname)
        ret, _, _ = volume_create(self.mnode,
                                  self.volname,
                                  bricks_list,
                                  force=False)
        self.assertEqual(ret, 0, ("Unable"
                                  "to create volume %s" % self.volname))
        g.log.info("Volume created successfully %s", self.volname)

        ret, _, _ = volume_start(self.mnode, self.volname, False)
        self.assertEqual(ret, 0, ("Failed to start the "
                                  "volume %s", self.volname))
        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        # Mounting a volume
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)
        remove_brick_list = []
        remove_brick_list.append(bricks_list[2])
        ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
                                 'start')
        self.assertEqual(ret, 0, "Failed to start remove brick operation")
        g.log.info("Remove bricks operation started successfully")

        ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
                                 'stop')
        self.assertEqual(ret, 0, "Failed to stop remove brick operation")
        g.log.info("Remove bricks operation stopped successfully")

        g.log.info("Starting Fix-layoyt on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname, True)
        self.assertEqual(ret, 0, ("Failed to start rebalance for fix-layout"
                                  "on the volume %s", self.volname))
        g.log.info("Successfully started fix-layout on the volume %s",
                   self.volname)

        # Wait for fix-layout to complete
        g.log.info("Waiting for fix-layout to complete")
        ret = wait_for_fix_layout_to_complete(self.mnode, self.volname)
        self.assertTrue(ret, ("Fix-layout is not yet complete on the volume "
                              "%s", self.volname))
        g.log.info("Fix-layout is successfully complete on the volume %s",
                   self.volname)
        ret = mkdir(self.mounts[0].client_system,
                    "%s/dir1" % self.mounts[0].mountpoint)
        self.assertTrue(ret, ("Failed to create directory dir1"))
        g.log.info("directory dir1 is created successfully")

        brick_server, brick_dir = remove_brick_list[0].split(':')
        folder_name = brick_dir + "/dir1"
        g.log.info("Check trusted.glusterfs.dht on host  %s for directory %s",
                   brick_server, folder_name)

        ret = get_fattr(brick_server, folder_name, 'trusted.glusterfs.dht')
        self.assertTrue(
            ret, ("Failed to get trusted.glusterfs.dht for %s" % folder_name))
        g.log.info("get trusted.glusterfs.dht xattr for %s successfully",
                   folder_name)
示例#13
0
    def test_rebalance_status_from_newly_probed_node(self):

        # Peer probe first 3 servers
        servers_info_from_three_nodes = {}
        for server in self.servers[0:3]:
            servers_info_from_three_nodes[server] = self.all_servers_info[
                server]
            # Peer probe the first 3 servers
            ret, _, _ = peer_probe(self.mnode, server)
            self.assertEqual(ret, 0, "Peer probe failed to %s" % server)

        self.volume['servers'] = self.servers[0:3]
        # create a volume using the first 3 nodes
        ret = setup_volume(self.mnode,
                           servers_info_from_three_nodes,
                           self.volume,
                           force=True)
        self.assertTrue(
            ret, "Failed to create"
            "and start volume %s" % self.volname)

        # Mounting a volume
        ret = self.mount_volume(self.mounts)
        self.assertTrue(ret, "Volume mount failed for %s" % self.volname)

        # Checking volume mounted or not
        ret = is_mounted(self.volname, self.mounts[0].mountpoint, self.mnode,
                         self.mounts[0].client_system, self.mount_type)
        self.assertTrue(
            ret, "Volume not mounted on mount point: %s" %
            self.mounts[0].mountpoint)
        g.log.info("Volume %s mounted on %s", self.volname,
                   self.mounts[0].mountpoint)

        # run IOs
        g.log.info("Starting IO on all mounts...")
        self.counter = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 10 "
                "--dir-length 5 "
                "--max-num-of-dirs 3 "
                "--num-of-files 100 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))
            ret = g.run(mount_obj.client_system, cmd)
            self.assertEqual(ret, 0,
                             "IO failed on %s" % mount_obj.client_system)
            self.counter = self.counter + 10

        # add a brick to the volume and start rebalance
        brick_to_add = form_bricks_list(self.mnode, self.volname, 1,
                                        self.servers[0:3],
                                        servers_info_from_three_nodes)
        ret, _, _ = add_brick(self.mnode, self.volname, brick_to_add)
        self.assertEqual(ret, 0, "Failed to add a brick to %s" % self.volname)

        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to start rebalance")

        # peer probe a new node from existing cluster
        ret, _, _ = peer_probe(self.mnode, self.servers[3])
        self.assertEqual(ret, 0, "Peer probe failed")

        ret = get_rebalance_status(self.servers[3], self.volname)
        self.assertIsNone(ret, "Failed to get rebalance status")
示例#14
0
    def test_volume_op(self):

        # Starting a non existing volume should fail
        ret, _, _ = volume_start(self.mnode, "no_vol", force=True)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to Start a non"
            " existing volume. Actual: Successfully started "
            "a non existing volume")
        g.log.info("Starting a non existing volume is failed")

        # Stopping a non existing volume should fail
        ret, _, _ = volume_stop(self.mnode, "no_vol", force=True)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to stop "
            "non-existing volume. Actual: Successfully "
            "stopped a non existing volume")
        g.log.info("Stopping a non existing volume is failed")

        # Deleting a non existing volume should fail
        ret = volume_delete(self.mnode, "no_vol")
        self.assertTrue(
            ret, "Expected: It should fail to delete a "
            "non existing volume. Actual:Successfully deleted "
            "a non existing volume")
        g.log.info("Deleting a non existing volume is failed")

        # Detach a server and try to create volume with node
        # which is not in cluster
        ret, _, _ = peer_detach(self.mnode, self.servers[1])
        self.assertEqual(ret, 0, ("Peer detach is failed"))
        g.log.info("Peer detach is successful")

        num_of_bricks = len(self.servers)
        bricks_list = form_bricks_list(self.mnode, self.volname, num_of_bricks,
                                       self.servers, self.all_servers_info)

        ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
        self.assertNotEqual(
            ret, 0, "Successfully created volume with brick "
            "from which is not a part of node")
        g.log.info("Creating a volume with brick from node which is not part "
                   "of cluster is failed")

        # Peer probe the detached server
        ret, _, _ = peer_probe(self.mnode, self.servers[1])
        self.assertEqual(ret, 0, ("Peer probe is failed"))
        g.log.info("Peer probe is successful")

        # Create and start a volume
        ret = setup_volume(self.mnode,
                           self.all_servers_info,
                           self.volume,
                           force=True)
        self.assertTrue(ret, "Failed to create the volume")
        g.log.info("Successfully created and started the volume")

        # Starting already started volume should fail
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to start a "
            "already started volume. Actual:Successfully"
            " started a already started volume ")
        g.log.info("Starting a already started volume is Failed.")

        # Deleting a volume without stopping should fail
        ret = volume_delete(self.mnode, self.volname)
        self.assertFalse(ret, ("Expected: It should fail to delete a volume"
                               " without stopping. Actual: Successfully "
                               "deleted a volume without stopping it"))
        g.log.error("Failed to delete a volume without stopping it")

        # Stopping a volume should succeed
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("volume stop is failed"))
        g.log.info("Volume stop is success")

        # Stopping a already stopped volume should fail
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to stop a "
            "already stopped volume . Actual: Successfully"
            "stopped a already stopped volume")
        g.log.info("Volume stop is failed on already stopped volume")

        # Deleting a volume should succeed
        ret = volume_delete(self.mnode, self.volname)
        self.assertTrue(ret, ("Volume delete is failed"))
        g.log.info("Volume delete is success")

        # Deleting a non existing volume should fail
        ret = volume_delete(self.mnode, self.volname)
        self.assertTrue(
            ret, "Expected: It should fail to delete a non "
            "existing volume. Actual:Successfully deleted a "
            "non existing volume")
        g.log.info("Volume delete is failed for non existing volume")

        # Volume info command should succeed
        ret = get_volume_info(self.mnode)
        self.assertIsNotNone(ret, "volume info command failed")
        g.log.info("Volume info command is success")
示例#15
0
    def test_peer_probe_status(self):

        # get FQDN of node1 and node2
        node1 = socket.getfqdn(self.mnode)
        node2 = socket.getfqdn(self.servers[1])

        # peer probe to a new node, N2 from N1
        ret, _, err = peer_probe(node1, node2)
        self.assertEqual(ret, 0, ("Peer probe failed to %s from %s with "
                                  "error message %s" % (self.servers[1],
                                                        self.mnode, err)))
        g.log.info("Peer probe from %s to %s is success", self.mnode,
                   self.servers[1])

        # check peer status in both the nodes, it should have FQDN
        # from node1
        ret, out, err = peer_status(self.mnode)
        self.assertEqual(ret, 0, ("Failed to get peer status from %s with "
                                  "error message %s" % (self.mnode, err)))
        g.log.info("Successfully got peer status from %s", self.mnode)

        self.assertIn(node2, out, ("FQDN of %s is not present in the "
                                   "output of peer status from %s"
                                   % (self.servers[1], self.mnode)))
        g.log.info("FQDN of %s is present in peer status of %s",
                   self.servers[1], self.mnode)

        # from node2
        ret, out, err = peer_status(self.servers[1])
        self.assertEqual(ret, 0, ("Failed to get peer status from %s with "
                                  "error message %s" % (self.servers[1], err)))
        g.log.info("Successfully got peer status from %s", self.servers[1])

        self.assertIn(node1, out, ("FQDN of %s is not present in the "
                                   "output of peer status from %s"
                                   % (self.mnode, self.servers[1])))
        g.log.info("FQDN of %s is present in peer status of %s",
                   self.mnode, self.servers[1])

        # create a distributed volume with 2 bricks
        servers_info_from_two_node_cluster = {}
        for server in self.servers[0:2]:
            servers_info_from_two_node_cluster[
                server] = self.all_servers_info[server]

        self.volume['servers'] = self.servers[0:2]
        self.volume['voltype']['dist_count'] = 2
        ret = setup_volume(self.mnode, servers_info_from_two_node_cluster,
                           self.volume)
        self.assertTrue(ret, ("Failed to create "
                              "and start volume %s" % self.volname))
        g.log.info("Successfully created and started the volume %s",
                   self.volname)

        # peer probe to a new node, N3
        ret, _, err = peer_probe(self.mnode, self.servers[2])
        self.assertEqual(ret, 0, ("Peer probe failed to %s from %s with "
                                  "error message %s" % (self.servers[2],
                                                        self.mnode, err)))
        g.log.info("Peer probe from %s to %s is success", self.mnode,
                   self.servers[2])

        # add a brick from N3 to the volume
        num_bricks_to_add = 1
        server_info = {}
        server_info[self.servers[2]] = self.all_servers_info[self.servers[2]]
        brick = form_bricks_list(self.mnode, self.volname, num_bricks_to_add,
                                 self.servers[2], server_info)
        ret, _, _ = add_brick(self.mnode, self.volname, brick)
        self.assertEqual(ret, 0, ("Failed to add brick to volume %s"
                                  % self.volname))
        g.log.info("add brick to the volume %s is success", self.volname)

        # get volume info, it should have correct brick information
        ret = get_volume_info(self.mnode, self.volname)
        self.assertIsNotNone(ret, ("Failed to get volume info from %s"
                                   % self.mnode))
        g.log.info("volume info from %s is success", self.mnode)

        brick3 = ret[self.volname]['bricks']['brick'][2]['name']
        self.assertEqual(brick3, str(brick[0]), ("Volume info has incorrect "
                                                 "information"))
        g.log.info("Volume info has correct information")
    def test_rebalance_hang(self):
        """
        In this test case:
        1. Trusted storage Pool of 2 nodes
        2. Create a distributed volumes with 2 bricks
        3. Start the volume
        4. Mount the volume
        5. Add some data file on mount
        6. Start rebalance with force
        7. kill glusterd on 2nd node
        8. Issue volume related command
        """

        # pylint: disable=too-many-statements
        my_server_info = {
            self.servers[0]: self.all_servers_info[self.servers[0]]
        }
        my_servers = self.servers[0:2]
        index = 1
        ret, _, _ = peer_probe(self.servers[0], self.servers[index])
        self.assertEqual(ret, 0, ("peer probe from %s to %s is failed",
                                  self.servers[0], self.servers[index]))
        g.log.info("peer probe is success from %s to "
                   "%s", self.servers[0], self.servers[index])
        key = self.servers[index]
        my_server_info[key] = self.all_servers_info[key]

        self.volname = "testvol"
        bricks_list = form_bricks_list(self.mnode, self.volname, 2, my_servers,
                                       my_server_info)
        g.log.info("Creating a volume %s ", self.volname)
        ret, _, _ = volume_create(self.mnode,
                                  self.volname,
                                  bricks_list,
                                  force=False)
        self.assertEqual(ret, 0, ("Unable"
                                  "to create volume %s" % self.volname))
        g.log.info("Volume created successfully %s", self.volname)

        ret, _, _ = volume_start(self.mnode, self.volname, False)
        self.assertEqual(ret, 0, ("Failed to start the "
                                  "volume %s", self.volname))
        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        # Mounting a volume
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)

        self.all_mounts_procs = []
        # Creating files
        command = ("cd %s/ ; "
                   "for i in `seq 1 10` ; "
                   "do mkdir l1_dir.$i ; "
                   "for j in `seq 1 5` ; "
                   "do mkdir l1_dir.$i/l2_dir.$j ; "
                   "for k in `seq 1 10` ; "
                   "do dd if=/dev/urandom of=l1_dir.$i/l2_dir.$j/test.$k "
                   "bs=128k count=$k ; "
                   "done ; "
                   "done ; "
                   "done ; " % (self.mounts[0].mountpoint))

        proc = g.run_async(self.mounts[0].client_system,
                           command,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)
        self.io_validation_complete = False
        # Validate IO
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")

        g.log.info("Starting rebalance with force on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname, False, True)
        self.assertEqual(
            ret, 0, ("Failed to start rebalance for volume %s", self.volname))
        g.log.info("Successfully rebalance on the volume %s", self.volname)

        ret = stop_glusterd(self.servers[1])
        self.assertTrue(ret, "Failed to stop glusterd on one of the node")
        ret = is_glusterd_running(self.servers[1])
        self.assertNotEqual(
            ret, 0, ("Glusterd is not stopped on servers %s", self.servers[1]))
        g.log.info("Glusterd stop on the nodes : %s succeeded",
                   self.servers[1])

        # Wait for fix-layout to complete
        g.log.info("Waiting for rebalance to complete")
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
        self.assertTrue(ret, ("Rebalance is not yet complete on the volume "
                              "%s", self.volname))
        g.log.info("Rebalance is successfully complete on the volume %s",
                   self.volname)

        vol_status = get_volume_status(self.mnode, self.volname)
        self.assertIsNotNone(
            vol_status, "Failed to get volume "
            "status for %s" % self.volname)

        # Start glusterd on the node where it is stopped
        ret = start_glusterd(self.servers[1])
        self.assertTrue(ret, "glusterd start on the node failed")
        count = 0
        while count < 60:
            ret = is_glusterd_running(self.servers[1])
            if not ret:
                break
            sleep(2)
            count += 1
        self.assertEqual(ret, 0,
                         "glusterd is not running on %s" % self.servers[1])
        g.log.info("Glusterd start on the nodes : %s "
                   "succeeded", self.servers[1])
    def test_brick_port(self):
        # pylint: disable=too-many-statements, too-many-branches
        """
        In this test case:
        1. Trusted storage Pool of 2 nodes
        2. Create a distributed volumes with 2 bricks
        3. Start the volume
        4. Stop glusterd on one node 2
        5. Modify any of the volume option on node 1
        6. Start glusterd on node 2
        7. Check volume status, brick should get port
        """
        my_server_info = {
            self.servers[0]: self.all_servers_info[self.servers[0]]
        }
        my_servers = self.servers[0:2]
        index = 1
        ret, _, _ = peer_probe(self.servers[0], self.servers[index])
        self.assertEqual(ret, 0, ("peer probe from %s to %s is failed",
                                  self.servers[0], self.servers[index]))
        g.log.info("peer probe is success from %s to "
                   "%s", self.servers[0], self.servers[index])
        key = self.servers[index]
        my_server_info[key] = self.all_servers_info[key]

        self.volname = "testvol"
        bricks_list = form_bricks_list(self.mnode, self.volname, 2,
                                       my_servers,
                                       my_server_info)
        g.log.info("Creating a volume %s ", self.volname)
        ret = volume_create(self.mnode, self.volname,
                            bricks_list, force=False)
        self.assertEqual(ret[0], 0, ("Unable"
                                     "to create volume %s" % self.volname))
        g.log.info("Volume created successfully %s", self.volname)

        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to start the "
                                  "volume %s", self.volname))
        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")

        g.log.info("Successfully got the list of bricks of volume")

        vol_status = get_volume_status(self.mnode, self.volname)
        self.assertIsNotNone(vol_status, "Failed to get volume "
                             "status for %s" % self.volname)
        totport = 0
        for _, value in vol_status.items():
            for _, val in value.items():
                for _, value1 in val.items():
                    if int(value1["port"]) > 0:
                        totport += 1

        self.assertEqual(totport, 2, ("Volume %s is not started successfully"
                                      "because no. of brick port is not equal"
                                      " to 2", self.volname))

        ret = stop_glusterd(self.servers[1])
        self.assertTrue(ret, "Failed to stop glusterd on one of the node")
        ret = wait_for_glusterd_to_start(self.servers[1])
        self.assertFalse(ret, "glusterd is still running on %s"
                         % self.servers[1])
        g.log.info("Glusterd stop on the nodes : %s "
                   "succeeded", self.servers[1])

        option = {'performance.readdir-ahead': 'on'}
        ret = set_volume_options(self.servers[0], self.volname, option)
        self.assertTrue(ret, "gluster volume set %s performance.readdir-ahead"
                             "on is failed on server %s"
                        % (self.volname, self.servers[0]))
        g.log.info("gluster volume set %s performance.readdir-ahead on"
                   "successfully on :%s", self.volname, self.servers[0])

        ret = start_glusterd(self.servers[1])
        self.assertTrue(ret, "Failed to start glusterd on one of the node")
        g.log.info("Glusterd start on the nodes : %s "
                   "succeeded", self.servers[1])
        ret = wait_for_glusterd_to_start(self.servers[1])
        self.assertTrue(ret, "glusterd is not running on %s"
                        % self.servers[1])
        g.log.info("Glusterd start on the nodes : %s "
                   "succeeded", self.servers[1])

        ret = wait_for_peers_to_connect(self.servers[0], self.servers[1])
        self.assertTrue(ret, "glusterd is not connected %s with peer %s"
                        % (self.servers[0], self.servers[1]))

        vol_status = get_volume_status(self.mnode, self.volname)
        self.assertIsNotNone(vol_status, "Failed to get volume "
                             "status for %s" % self.volname)
        totport = 0
        for _, value in vol_status.items():
            for _, val in value.items():
                for _, value1 in val.items():
                    if int(value1["port"]) > 0:
                        totport += 1

        self.assertEqual(totport, 2, ("Volume %s is not started successfully"
                                      "because no. of brick port is not equal"
                                      " to 2", self.volname))
    def test_glusterd_quorum_validation(self):
        """
        -> Creating two volumes and starting them, stop the second volume
        -> set the server quorum and set the ratio to 90
        -> Stop the glusterd in one of the node, so the quorum won't meet
        -> Peer probing a new node should fail
        -> Volume stop will fail
        -> volume delete will fail
        -> volume reset will fail
        -> Start the glusterd on the node where it is stopped
        -> Volume stop, start, delete will succeed once quorum is met
        """
        # pylint: disable=too-many-statements, too-many-branches

        # Peer probe first 3 servers
        servers_info_from_three_nodes = {}
        for server in self.servers[0:3]:
            servers_info_from_three_nodes[server] = self.all_servers_info[
                server]

            # Peer probe the first 3 servers
            ret, _, _ = peer_probe(self.mnode, server)
            self.assertEqual(ret, 0,
                             ("Peer probe failed to one of the server"))
        g.log.info("Peer probe to first 3 nodes succeeded")

        self.volume['servers'] = self.servers[0:3]
        # Create a volume using the first 3 nodes
        ret = setup_volume(self.mnode,
                           servers_info_from_three_nodes,
                           self.volume,
                           force=True)
        self.assertTrue(ret, ("Failed to create and start volume"))
        g.log.info("Volume created and started successfully")

        # Creating another volume and stopping it
        second_volume = "second_volume"
        self.volume['name'] = second_volume
        ret = setup_volume(self.mnode,
                           servers_info_from_three_nodes,
                           self.volume,
                           force=True)
        self.assertTrue(ret, ("Failed to create and start volume"))
        g.log.info("Volume created and started succssfully")

        # stopping the second volume
        g.log.info("Stopping the second volume %s", second_volume)
        ret, _, _ = volume_stop(self.mnode, second_volume)
        self.assertEqual(ret, 0, ("Failed to stop the volume"))
        g.log.info("Successfully stopped second volume %s", second_volume)

        # Setting the server-quorum-type as server
        self.options = {"cluster.server-quorum-type": "server"}
        vol_list = get_volume_list(self.mnode)
        self.assertIsNotNone(vol_list, "Failed to get the volume list")
        g.log.info("Fetched the volume list")
        for volume in vol_list:
            g.log.info(
                "Setting the server-quorum-type as server"
                " on volume %s", volume)
            ret = set_volume_options(self.mnode, volume, self.options)
            self.assertTrue(ret, ("Failed to set the quorum type as a server"
                                  " on volume %s", volume))
        g.log.info("Server Quorum type is set as a server")

        # Setting the server quorum ratio to 90
        self.quorum_perecent = {'cluster.server-quorum-ratio': '90%'}
        ret = set_volume_options(self.mnode, 'all', self.quorum_perecent)
        self.assertTrue(ret, ("Failed to set the server quorum ratio "
                              "to 90 on servers"))
        g.log.info("Successfully set server quorum ratio to 90% on servers")

        # Stop glusterd on one of the node
        ret = stop_glusterd(self.servers[2])
        self.assertTrue(ret, ("Failed to stop glusterd on "
                              "node %s", self.servers[2]))
        g.log.info("Glusterd stop on the nodes : %s"
                   " succeeded", self.servers[2])

        # Check glusterd is stopped
        ret = is_glusterd_running(self.servers[2])
        self.assertEqual(ret, 1, "Unexpected: Glusterd is running on node")
        g.log.info("Expected: Glusterd stopped on node %s", self.servers[2])

        # Adding a new peer will fail as quorum not met
        ret, _, _ = peer_probe(self.mnode, self.servers[3])
        self.assertNotEqual(ret, 0,
                            ("Unexpected:"
                             "Succeeded to peer probe new node %s when quorum "
                             "is not met", self.servers[3]))
        g.log.info("Failed to peer probe new node as expected"
                   " when quorum not met")

        # Stopping an already started volume should fail as quorum is not met
        ret, _, _ = volume_start(self.mnode, second_volume)
        self.assertNotEqual(
            ret, 0, "Unexpected: Successfuly started "
            "volume even when quorum not met.")
        g.log.info(
            "Volume start %s failed as expected when quorum "
            "is not met", second_volume)

        # Stopping a volume should fail stop the first volume
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(
            ret, 1, "Unexpected: Successfully stopped"
            " volume even when quourm is not met")
        g.log.info(
            "volume stop %s failed as expected when quorum "
            "is not met", self.volname)

        # Stopping a volume with force option should fail
        ret, _, _ = volume_stop(self.mnode, self.volname, force=True)
        self.assertNotEqual(
            ret, 0, "Unexpected: Successfully "
            "stopped volume with force. Expected: "
            "Volume stop should fail when quourm is not met")
        g.log.info("volume stop failed as expected when quorum is not met")

        # Deleting a volume should fail. Deleting the second volume.
        ret = volume_delete(self.mnode, second_volume)
        self.assertFalse(
            ret, "Unexpected: Volume delete was "
            "successful even when quourm is not met")
        g.log.info("volume delete failed as expected when quorum is not met")

        # Volume reset should fail when quorum is not met
        ret, _, _ = volume_reset(self.mnode, self.volname)
        self.assertNotEqual(
            ret, 0, "Unexpected: Volume reset was "
            "successful even when quorum is not met")
        g.log.info("volume reset failed as expected when quorum is not met")

        # Volume reset should fail even with force when quourum is not met
        ret, _, _ = volume_reset(self.mnode, self.volname, force=True)
        self.assertNotEqual(
            ret, 0, "Unexpected: Volume reset was "
            "successful with force even "
            "when quourm is not met")
        g.log.info("volume reset failed as expected when quorum is not met")

        # Start glusterd on the node where glusterd is stopped
        ret = start_glusterd(self.servers[2])
        self.assertTrue(ret, "Failed to start glusterd on one node")
        g.log.info("Started glusterd on server"
                   " %s successfully", self.servers[2])

        ret = is_glusterd_running(self.servers[2])
        self.assertEqual(ret, 0, ("glusterd is not running on "
                                  "node %s", self.servers[2]))
        g.log.info("glusterd is running on node" " %s ", self.servers[2])

        # Check peer status whether all peer are in connected state none of the
        # nodes should be in peer rejected state
        halt, counter, _rc = 30, 0, False
        g.log.info("Wait for some seconds, right after glusterd start it "
                   "will create two daemon process it need few seconds "
                   "(like 3-5) to initialize the glusterd")
        while counter < halt:
            ret = is_peer_connected(self.mnode, self.servers[0:3])
            if not ret:
                g.log.info("Peers are not connected state,"
                           " Retry after 2 seconds .......")
                sleep(2)
                counter = counter + 2
            else:
                _rc = True
                g.log.info("Peers are in connected state in the cluster")
                break

        self.assertTrue(_rc, ("Peers are not connected state after "
                              "bringing back glusterd online on the "
                              "nodes in which previously glusterd "
                              "had been stopped"))

        # Check all bricks are online or wait for the bricks to be online
        ret = wait_for_bricks_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, "All bricks are not online")
        g.log.info("All bricks of the volume %s are online", self.volname)

        # Once quorum is met should be able to cleanup the volume
        ret = volume_delete(self.mnode, second_volume)
        self.assertTrue(ret, "Volume delete failed even when quorum is met")
        g.log.info("volume delete succeed without any issues")

        # Volume stop should succeed
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Volume stop failed")
        g.log.info("succeeded stopping the volume as expected")

        # volume reset should succeed
        ret, _, _ = volume_reset(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Volume reset failed ")
        g.log.info("volume reset succeeded as expected when quorum is not met")

        # Peer probe new node should succeed
        ret, _, _ = peer_probe(self.mnode, self.servers[3])
        self.assertEqual(
            ret, 0, ("Failed to peer probe new node even when quorum is met"))
        g.log.info("Succeeded to peer probe new node when quorum met")

        # Check peer status whether all peer are in connected state none of the
        # nodes should be in peer rejected state
        halt, counter, _rc = 30, 0, False
        g.log.info("Wait for some seconds, right after peer probe")
        while counter < halt:
            ret = is_peer_connected(self.mnode, self.servers[0:3])
            if not ret:
                g.log.info("Peers are not connected state,"
                           " Retry after 2 seconds .......")
                sleep(2)
                counter = counter + 2
            else:
                _rc = True
                g.log.info("Peers are in connected state in the cluster")
                break

        self.assertTrue(_rc, ("Peers are not connected state"))
示例#19
0
    def test_peer_probe_when_glusterd_down(self):
        # pylint: disable=too-many-statements
        '''
        Test script to verify the behavior when we try to peer
        probe a valid node whose glusterd is down
        Also post validate to make sure no core files are created
        under "/", /var/log/core and /tmp  directory

        Ref: BZ#1257394 Provide meaningful error on peer probe and peer detach
        Test Steps:
        1 check the current peer status
        2 detach one of the valid nodes which is already part of cluster
        3 stop glusterd on that node
        4 try to attach above node to cluster, which must fail with
          Transport End point error
        5 Recheck the test using hostname, expected to see same result
        6 start glusterd on that node
        7 halt/reboot the node
        8 try to peer probe the halted node, which must fail again.
        9 The only error accepted is
          "peer probe: failed: Probe returned with Transport endpoint is not
          connected"
        10 Check peer status and make sure no other nodes in peer reject state
        '''

        ret, test_timestamp, _ = g.run_local('date +%s')
        test_timestamp = test_timestamp.strip()

        # detach one of the nodes which is part of the cluster
        g.log.info("detaching server %s ", self.servers[1])
        ret, _, err = peer_detach(self.mnode, self.servers[1])
        msg = 'peer detach: failed: %s is not part of cluster\n' \
              % self.servers[1]
        if ret:
            self.assertEqual(err, msg, "Failed to detach %s "
                             % (self.servers[1]))

        # bring down glusterd of the server which has been detached
        g.log.info("Stopping glusterd on %s ", self.servers[1])
        ret = stop_glusterd(self.servers[1])
        self.assertTrue(ret, "Fail to stop glusterd on %s " % self.servers[1])

        # trying to peer probe the node whose glusterd was stopped using its IP
        g.log.info("Peer probing %s when glusterd down ", self.servers[1])
        ret, _, err = peer_probe(self.mnode, self.servers[1])
        self.assertNotEqual(ret, 0, "Peer probe should not pass when "
                                    "glusterd is down")
        self.assertEqual(err, "peer probe: failed: Probe returned with "
                              "Transport endpoint is not connected\n")

        # trying to peer probe the same node with hostname
        g.log.info("Peer probing node %s using hostname with glusterd down ",
                   self.servers[1])
        hostname = g.run(self.servers[1], "hostname")
        ret, _, err = peer_probe(self.mnode, hostname[1].strip())
        self.assertNotEqual(ret, 0, "Peer probe should not pass when "
                                    "glusterd is down")
        self.assertEqual(err, "peer probe: failed: Probe returned with"
                              " Transport endpoint is not connected\n")

        # start glusterd again for the next set of test steps
        g.log.info("starting glusterd on %s ", self.servers[1])
        ret = start_glusterd(self.servers[1])
        self.assertTrue(ret, "glusterd couldn't start successfully on %s"
                        % self.servers[1])

        # reboot a server and then trying to peer probe at the time of reboot
        g.log.info("Rebooting %s and checking peer probe", self.servers[1])
        reboot = g.run_async(self.servers[1], "reboot")

        # Mandatory sleep for 3 seconds to make sure node is in halted state
        sleep(3)

        # Peer probing the node using IP when it is still not online
        g.log.info("Peer probing node %s which has been issued a reboot ",
                   self.servers[1])
        ret, _, err = peer_probe(self.mnode, self.servers[1])
        self.assertNotEqual(ret, 0, "Peer probe passed when it was expected to"
                                    " fail")
        self.assertEqual(err, "peer probe: failed: Probe returned with "
                              "Transport endpoint is not connected\n")

        # Peer probing the node using hostname when it is still not online
        g.log.info("Peer probing node %s using hostname which is still "
                   "not online ",
                   self.servers[1])
        ret, _, err = peer_probe(self.mnode, hostname[1].strip())
        self.assertNotEqual(ret, 0, "Peer probe should not pass when node "
                                    "has not come online")
        self.assertEqual(err, "peer probe: failed: Probe returned with "
                              "Transport endpoint is not connected\n")

        ret, _, _ = reboot.async_communicate()
        self.assertEqual(ret, 255, "reboot failed")

        # Validate if rebooted node is online or not
        count = 0
        while count < 40:
            sleep(15)
            ret, _ = are_nodes_online(self.servers[1])
            if ret:
                g.log.info("Node %s is online", self.servers[1])
                break
            count += 1
        self.assertTrue(ret, "Node in test not yet online")

        # check if glusterd is running post reboot
        ret = wait_for_glusterd_to_start(self.servers[1],
                                         glusterd_start_wait_timeout=120)
        self.assertTrue(ret, "Glusterd service is not running post reboot")

        # peer probe the node must pass
        g.log.info("peer probing node %s", self.servers[1])
        ret, _, err = peer_probe(self.mnode, self.servers[1])
        self.assertEqual(ret, 0, "Peer probe has failed unexpectedly with "
                                 "%s " % err)

        # checking if core file created in "/", "/tmp" and "/var/log/core"
        ret = is_core_file_created(self.servers, test_timestamp)
        self.assertTrue(ret, "core file found")
示例#20
0
    def test_volume_create(self):

        # create and start a volume
        self.volume['name'] = "first_volume"
        self.volname = "first_volume"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")

        # bring a brick down and volume start force should bring it to online

        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        ret = bring_bricks_offline(self.volname, bricks_list[0:2])
        self.assertTrue(ret, "Failed to bring down the bricks")
        g.log.info("Successfully brought the bricks down")

        ret, _, _ = volume_start(self.mnode, self.volname, force=True)
        self.assertEqual(ret, 0, "Failed to start the volume")
        g.log.info("Volume start with force is success")

        ret = wait_for_bricks_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, "Failed to bring the bricks online")
        g.log.info("Volume start with force successfully brought all the "
                   "bricks online")

        # create volume with previously used bricks and different volume name
        self.volname = "second_volume"
        ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
        self.assertNotEqual(
            ret, 0, "Expected: It should fail to create a "
            "volume with previously used bricks. Actual:"
            "Successfully created the volume with previously"
            " used bricks")
        g.log.info("Failed to create the volume with previously used bricks")

        # create a volume with already existing volume name
        self.volume['name'] = "first_volume"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(
            ret, "Expected: It should fail to create a volume"
            " with already existing volume name. Actual: "
            "Successfully created the volume with "
            "already existing volname")
        g.log.info("Failed to create the volume with already existing volname")

        # creating a volume with non existing brick path should fail

        self.volname = "second_volume"
        bricks_list = form_bricks_list(self.mnode, self.volname,
                                       len(self.servers), self.servers,
                                       self.all_servers_info)
        nonexisting_brick_index = random.randint(0, len(bricks_list) - 1)
        non_existing_brick = bricks_list[nonexisting_brick_index].split(":")[0]
        non_existing_path = ":/brick/non_existing_path"
        non_existing_brick = non_existing_brick + non_existing_path
        bricks_list[nonexisting_brick_index] = non_existing_brick

        ret, _, _ = volume_create(self.mnode, self.volname, bricks_list)
        self.assertNotEqual(
            ret, 0, "Expected: Creating a volume with non "
            "existing brick path should fail. Actual: "
            "Successfully created the volume with "
            "non existing brick path")
        g.log.info("Failed to create the volume with non existing brick path")

        # cleanup the volume and peer detach all servers. form two clusters,try
        # to create a volume with bricks whose nodes are in different clusters

        # cleanup volumes
        vol_list = get_volume_list(self.mnode)
        self.assertIsNotNone(vol_list, "Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            self.assertTrue(ret, "Unable to delete volume % s" % volume)

        # peer detach all servers
        ret = peer_detach_servers(self.mnode, self.servers)
        self.assertTrue(ret, "Peer detach to all servers is failed")
        g.log.info("Peer detach to all the servers is success")

        # form cluster 1
        ret, _, _ = peer_probe(self.servers[0], self.servers[1])
        self.assertEqual(
            ret, 0, "Peer probe from %s to %s is failed" %
            (self.servers[0], self.servers[1]))
        g.log.info("Peer probe is success from %s to %s" %
                   (self.servers[0], self.servers[1]))

        # form cluster 2
        ret, _, _ = peer_probe(self.servers[2], self.servers[3])
        self.assertEqual(
            ret, 0, "Peer probe from %s to %s is failed" %
            (self.servers[2], self.servers[3]))
        g.log.info("Peer probe is success from %s to %s" %
                   (self.servers[2], self.servers[3]))

        # Creating a volume with bricks which are part of another
        # cluster should fail
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertFalse(
            ret, "Expected: Creating a volume with bricks"
            " which are part of another cluster should fail."
            " Actual: Successfully created the volume with "
            "bricks which are part of another cluster")
        g.log.info("Failed to create the volume with bricks which are "
                   "part of another cluster")

        # form a cluster, bring a node down. try to create a volume when one of
        # the brick node is down
        ret, _, _ = peer_detach(self.servers[2], self.servers[3])
        self.assertEqual(ret, 0, "Peer detach is failed")
        g.log.info("Peer detach is success")

        ret = peer_probe_servers(self.mnode, self.servers)
        self.assertTrue(ret, "Peer probe is failed")
        g.log.info("Peer probe to all the servers is success")

        random_server = self.servers[random.randint(1, len(self.servers) - 1)]
        ret = stop_glusterd(random_server)
        self.assertTrue(ret, "Glusterd is stopped successfully")

        self.volume['name'] = "third_volume"
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertFalse(
            ret, "Expected: It should fail to create a volume "
            "when one of the node is down. Actual: Successfully "
            "created the volume with bbrick whose node is down")

        g.log.info("Failed to create the volume with brick whose node is down")
示例#21
0
    def test_peer_probe(self):
        """
        In this test case:
        1. Create Dist Volume on Node 1
        2. Create Replica Volume on Node 2
        3. Peer Probe N2 from N1(should fail)
        4. Clean All Volumes
        5. Peer Probe N1 to N2(should success)
           Peer Probe N3 to N2(should fail)
        6. Create replica Volume on N1 and N2
        7. Peer probe from N3 to N1(should fail)
        8. Peer probe from N1 to N3(should succeed)
        9. Create replica Volume on N1, N2 and N2
        10.Start Volume
        11. delete volume (should fail)
        12. Stop volume
        13. Clean up all volumes
        """

        # pylint: disable=too-many-statements
        # Create a distributed volume on Node1
        number_of_brick = 1
        servers_info_from_single_node = {}
        servers_info_from_single_node[self.servers[0]] = self.all_servers_info[
            self.servers[0]]
        self.volname = "testvol"
        bricks_list = form_bricks_list(self.servers[0], self.volname,
                                       number_of_brick, self.servers[0],
                                       servers_info_from_single_node)
        ret, _, _ = volume_create(self.servers[0], self.volname, bricks_list,
                                  True)
        self.assertEqual(ret, 0, "Volume create failed")
        g.log.info("Volume %s created successfully", self.volname)

        # Create a replicate volume on Node2 without force
        number_of_brick = 2
        servers_info_from_single_node = {}
        servers_info_from_single_node[self.servers[1]] = self.all_servers_info[
            self.servers[1]]
        kwargs = {'replica_count': 2}
        self.volname = "new-volume"
        bricks_list = form_bricks_list(self.servers[1], self.volname,
                                       number_of_brick, self.servers[1],
                                       servers_info_from_single_node)

        # creation of replicate volume without force should fail
        ret, _, _ = volume_create(self.servers[1], self.volname, bricks_list,
                                  False, **kwargs)
        self.assertNotEqual(ret, 0, ("Unexpected: Successfully created "
                                     "the replicate volume on node2 "
                                     "without force"))
        g.log.info(
            "Failed to create the replicate volume %s as "
            " expected without force", self.volname)

        # Create a replica volume on Node2 with force
        number_of_brick = 3
        servers_info_from_single_node = {}
        servers_info_from_single_node[self.servers[1]] = self.all_servers_info[
            self.servers[1]]
        kwargs = {'replica_count': 3}
        self.volname = "new-volume"
        bricks_list = form_bricks_list(self.servers[1], self.volname,
                                       number_of_brick, self.servers[1],
                                       servers_info_from_single_node)

        # creation of replicate volume with force should succeed
        ret, _, _ = volume_create(self.servers[1], self.volname, bricks_list,
                                  True, **kwargs)
        self.assertEqual(ret, 0, "Volume create failed")
        g.log.info("Volume %s created", self.volname)

        # Perform peer probe from N1 to N2
        ret, _, _ = peer_probe(self.servers[0], self.servers[1])
        self.assertNotEqual(
            ret, 0,
            ("peer probe is success from %s to %s even if %s "
             " is a part of another cluster or having volumes "
             " configured", self.servers[0], self.servers[1], self.servers[1]))
        g.log.info("peer probe failed from %s to "
                   "%s as expected", self.servers[0], self.servers[1])

        # clean up all volumes
        for server in self.servers[0:2]:
            # Listing all the volumes
            vol_list = get_volume_list(server)
            self.assertIsNotNone(vol_list, "Unable to get volumes list")
            g.log.info("Getting the volume list from %s", self.mnode)
            for vol in vol_list:
                g.log.info("deleting volume : %s", vol)
                ret = cleanup_volume(server, vol)
                self.assertTrue(ret, ("Failed to Cleanup the Volume %s", vol))
                g.log.info("Volume deleted successfully : %s", vol)

        # Perform peer probe from N1 to N2 should success
        ret, _, _ = peer_probe(self.servers[0], self.servers[1])
        self.assertEqual(ret, 0, ("peer probe from %s to %s is "
                                  "failed", self.servers[0], self.servers[1]))
        g.log.info("peer probe is success from %s to "
                   "%s", self.servers[0], self.servers[1])

        # Checking if peer is connected
        counter = 0
        while counter < 30:
            ret = is_peer_connected(self.servers[0], self.servers[1])
            counter += 1
            if ret:
                break
            sleep(3)
        self.assertTrue(ret, "Peer is not in connected state.")
        g.log.info("Peers is in connected state.")

        # Perform peer probe from N3 to N2 should fail
        ret, _, _ = peer_probe(self.servers[2], self.servers[1])
        self.assertNotEqual(
            ret, 0,
            ("peer probe is success from %s to %s even if %s "
             "is a part of another cluster or having volumes "
             "configured", self.servers[2], self.servers[1], self.servers[1]))
        g.log.info("peer probe failed from %s to "
                   "%s as expected", self.servers[2], self.servers[1])

        # Create a replica volume on N1 and N2 with force
        number_of_brick = 2
        servers_info_from_two_node = {}
        for server in self.servers[0:2]:
            servers_info_from_two_node[server] = self.all_servers_info[server]
        kwargs = {'replica_count': 2}
        self.volname = "new-volume"
        bricks_list = form_bricks_list(self.servers[0], self.volname,
                                       number_of_brick, self.servers[0:2],
                                       servers_info_from_two_node)
        ret, _, _ = volume_create(self.servers[1], self.volname, bricks_list,
                                  True, **kwargs)
        self.assertEqual(ret, 0, "Volume create failed")
        g.log.info("Volume %s created succssfully", self.volname)

        # Perform peer probe from N3 to N1 should fail
        ret, _, _ = peer_probe(self.servers[2], self.servers[0])
        self.assertNotEqual(
            ret, 0,
            ("peer probe is success from %s to %s even if %s "
             "a part of another cluster or having volumes "
             "configured", self.servers[2], self.servers[0], self.servers[0]))
        g.log.info("peer probe is failed from %s to "
                   "%s as expected", self.servers[2], self.servers[0])

        # Perform peer probe from N1 to N3 should succed
        ret, _, _ = peer_probe(self.servers[0], self.servers[2])
        self.assertEqual(ret, 0, ("peer probe from %s to %s is "
                                  "failed", self.servers[0], self.servers[2]))
        g.log.info("peer probe is success from %s to "
                   "%s", self.servers[0], self.servers[2])

        # Checking if peer is connected
        counter = 0
        while counter < 30:
            ret = is_peer_connected(self.servers[0], self.servers[:3])
            counter += 1
            if ret:
                break
            sleep(3)
        self.assertTrue(ret, "Peer is not in connected state.")
        g.log.info("Peers is in connected state.")

        # Create a replica volume on N1, N2 and N3 with force
        number_of_brick = 3
        server_info_from_three_node = {}
        for server in self.servers[0:3]:
            server_info_from_three_node[server] = self.all_servers_info[server]
        kwargs = {'replica_count': 3}
        self.volname = "new-replica-volume"
        bricks_list = form_bricks_list(self.servers[2], self.volname,
                                       number_of_brick, self.servers[0:3],
                                       server_info_from_three_node)
        ret, _, _ = volume_create(self.servers[1], self.volname, bricks_list,
                                  True, **kwargs)
        self.assertEqual(ret, 0, "Volume create failed")
        g.log.info("creation of replica volume should succeed")

        ret, _, _ = volume_start(self.servers[2], self.volname, True)
        self.assertEqual(ret, 0, ("Failed to start the "
                                  "volume %s", self.volname))
        g.log.info("Volume %s start with force is success", self.volname)

        # Volume delete should fail without stopping volume
        self.assertTrue(
            volume_delete(self.servers[2], self.volname, xfail=True),
            "Unexpected Error: Volume deleted "
            "successfully without stopping volume")
        g.log.info(
            "Expected: volume delete should fail without "
            "stopping volume: %s", self.volname)

        # Volume stop with force
        ret, _, _ = volume_stop(self.mnode, self.volname, True)
        self.assertEqual(ret, 0, ("Failed to stop the volume "
                                  "%s", self.volname))
        g.log.info("Volume stop with force is success")
示例#22
0
    def test_remove_brick(self):
        """
        In this test case:
        1. Trusted storage Pool of 4 nodes
        2. Create a distributed-replicated volumes with 4 bricks
        3. Start the volume
        4. Fuse mount the gluster volume on out of trusted nodes
        5. Create some data file
        6. Start remove-brick operation for one replica pair
        7. Restart glusterd on all nodes
        8. Try to commit the remove-brick operation while rebalance
           is in progress, it should fail
        """

        # pylint: disable=too-many-statements
        my_servers = self.servers[0:4]
        my_server_info = {}
        for server in self.servers[0:4]:
            my_server_info[server] = self.all_servers_info[server]
        for index in range(1, 4):
            ret, _, _ = peer_probe(self.servers[0], self.servers[index])
            self.assertEqual(ret, 0, ("peer probe from %s to %s is failed",
                                      self.servers[0], self.servers[index]))
            g.log.info("peer probe is success from %s to "
                       "%s", self.servers[0], self.servers[index])

        # Validating whether the peer are connected or not
        # In jenkins This case is failing saying peers are not in connected
        # state, that is reason adding a check whether peers are connected
        # or not
        count = 0
        while count < 30:
            ret = is_peer_connected(self.mnode, my_servers)
            if ret:
                g.log.info("Peers are in connected state")
                break
            sleep(3)
            count = count + 1
        self.assertTrue(ret, "Some peers are not in connected state")

        self.volname = "testvol"
        bricks_list = form_bricks_list(self.mnode, self.volname, 4, my_servers,
                                       my_server_info)
        g.log.info("Creating a volume %s ", self.volname)
        kwargs = {}
        kwargs['replica_count'] = 2
        ret = volume_create(self.mnode,
                            self.volname,
                            bricks_list,
                            force=False,
                            **kwargs)
        self.assertEqual(ret[0], 0, ("Unable"
                                     "to create volume %s" % self.volname))
        g.log.info("Volume created successfully %s", self.volname)

        ret, _, _ = volume_start(self.mnode, self.volname, False)
        self.assertEqual(ret, 0, ("Failed to start the "
                                  "volume %s", self.volname))
        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        # Mounting a volume
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)

        self.all_mounts_procs = []
        # Creating files
        command = ("cd %s/ ; "
                   "for i in `seq 1 10` ; "
                   "do mkdir l1_dir.$i ; "
                   "for j in `seq 1 5` ; "
                   "do mkdir l1_dir.$i/l2_dir.$j ; "
                   "for k in `seq 1 10` ; "
                   "do dd if=/dev/urandom of=l1_dir.$i/l2_dir.$j/test.$k "
                   "bs=128k count=$k ; "
                   "done ; "
                   "done ; "
                   "done ; " % (self.mounts[0].mountpoint))

        proc = g.run_async(self.mounts[0].client_system,
                           command,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)
        self.io_validation_complete = False
        # Validate IO
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.io_validation_complete = True
        self.assertTrue(ret, "IO failed on some of the clients")

        remove_brick_list = bricks_list[2:4]
        ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
                                 'start')
        self.assertEqual(ret, 0, "Failed to start remove brick operation")
        g.log.info("Remove bricks operation started successfully")
        g.log.info("Restart glusterd on servers %s", self.servers)
        ret = restart_glusterd(self.servers)
        self.assertTrue(
            ret, ("Failed to restart glusterd on servers %s", self.servers))
        g.log.info("Successfully restarted glusterd on servers %s",
                   self.servers)

        ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
                                 'commit')
        self.assertNotEqual(ret, 0, "Remove brick commit ops should be fail")
        g.log.info("Remove bricks commit operation failure is expected")
 def _perform_peer_probe(self, peer):
     """ Perfroms peer probe to a given node """
     ret, _, err = peer_probe(self.mnode, peer)
     self.assertEqual(
         ret, 0, "Failed to peer probe %s from %s. Error : %s" %
         (peer, self.mnode, err))
    def test_peer_probe_when_glusterd_down(self):
        # pylint: disable=too-many-statements
        '''
        Test script to verify the behavior when we try to peer
        probe a valid node whose glusterd is down
        Also post validate to make sure no core files are created
        under "/", /var/log/core and /tmp  directory

        Ref: BZ#1257394 Provide meaningful error on peer probe and peer detach
        Test Steps:
        1 check the current peer status
        2 detach one of the valid nodes which is already part of cluster
        3 stop glusterd on that node
        4 try to attach above node to cluster, which must fail with
          Transport End point error
        5 Recheck the test using hostname, expected to see same result
        6 start glusterd on that node
        7 halt/reboot the node
        8 try to peer probe the halted node, which must fail again.
        9 The only error accepted is
          "peer probe: failed: Probe returned with Transport endpoint is not
          connected"
        10 Check peer status and make sure no other nodes in peer reject state
        '''

        ret, test_timestamp, _ = g.run_local('date +%s')
        test_timestamp = test_timestamp.strip()

        # Detach one of the nodes which is part of the cluster
        g.log.info("detaching server %s ", self.servers[1])
        ret, _, err = peer_detach(self.mnode, self.servers[1])
        msg = 'peer detach: failed: %s is not part of cluster\n' \
              % self.servers[1]
        if ret:
            self.assertEqual(err, msg, "Failed to detach %s "
                             % (self.servers[1]))

        # Bring down glusterd of the server which has been detached
        g.log.info("Stopping glusterd on %s ", self.servers[1])
        ret = stop_glusterd(self.servers[1])
        self.assertTrue(ret, "Fail to stop glusterd on %s " % self.servers[1])

        # Trying to peer probe the node whose glusterd was stopped using IP
        g.log.info("Peer probing %s when glusterd down ", self.servers[1])
        ret, _, err = peer_probe(self.mnode, self.servers[1])
        self.assertNotEqual(ret, 0, "Peer probe should not pass when "
                                    "glusterd is down")
        self.assertEqual(err, "peer probe: failed: Probe returned with "
                              "Transport endpoint is not connected\n")

        # Trying to peer probe the same node with hostname
        g.log.info("Peer probing node %s using hostname with glusterd down ",
                   self.servers[1])
        hostname = g.run(self.servers[1], "hostname")
        ret, _, err = peer_probe(self.mnode, hostname[1].strip())
        self.assertNotEqual(ret, 0, "Peer probe should not pass when "
                                    "glusterd is down")
        self.assertEqual(err, "peer probe: failed: Probe returned with"
                              " Transport endpoint is not connected\n")

        # Start glusterd again for the next set of test steps
        g.log.info("starting glusterd on %s ", self.servers[1])
        ret = start_glusterd(self.servers[1])
        self.assertTrue(ret, "glusterd couldn't start successfully on %s"
                        % self.servers[1])

        # Bring down the network for sometime
        network_status = bring_down_network_interface(self.servers[1], 150)

        # Peer probing the node using IP when it is still not online
        g.log.info("Peer probing node %s when network is down",
                   self.servers[1])
        ret, _, err = peer_probe(self.mnode, self.servers[1])
        self.assertNotEqual(ret, 0, "Peer probe passed when it was expected to"
                                    " fail")
        self.assertEqual(err.split("\n")[0], "peer probe: failed: Probe "
                                             "returned with Transport endpoint"
                                             " is not connected")

        # Peer probing the node using hostname when it is still not online
        g.log.info("Peer probing node %s using hostname which is still "
                   "not online ",
                   self.servers[1])
        ret, _, err = peer_probe(self.mnode, hostname[1].strip())
        self.assertNotEqual(ret, 0, "Peer probe should not pass when node "
                                    "has not come online")
        self.assertEqual(err.split("\n")[0], "peer probe: failed: Probe "
                                             "returned with Transport endpoint"
                                             " is not connected")

        ret, _, _ = network_status.async_communicate()
        if ret != 0:
            g.log.error("Failed to perform network interface ops")

        # Peer probe the node must pass
        g.log.info("peer probing node %s", self.servers[1])
        ret, _, err = peer_probe(self.mnode, self.servers[1])
        self.assertEqual(ret, 0, "Peer probe has failed unexpectedly with "
                                 "%s " % err)

        # Checking if core file created in "/", "/tmp" and "/var/log/core"
        ret = is_core_file_created(self.servers, test_timestamp)
        self.assertTrue(ret, "core file found")
    def test_snap_info_from_detached_node(self):
        # pylint: disable=too-many-statements
        """
        Create a volume with single brick
        Create a snapshot
        Activate the snapshot created
        Enabled uss on the volume
        Validated snap info on all the nodes
        Peer detach one node
        Validate /var/lib/glusterd/snaps on the detached node
        Probe the detached node
        """

        # Creating volume with single brick on one node
        servers_info_single_node = {
            self.servers[0]: self.all_servers_info[self.servers[0]]
        }
        bricks_list = form_bricks_list(self.mnode, self.volname, 1,
                                       self.servers[0],
                                       servers_info_single_node)
        ret, _, _ = volume_create(self.servers[0], self.volname, bricks_list)
        self.assertEqual(ret, 0, "Volume creation failed")
        g.log.info("Volume %s created successfully", self.volname)

        # Create a snapshot of the volume without volume start should fail
        self.snapname = "snap1"
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                self.snapname,
                                timestamp=False)
        self.assertNotEqual(ret, 0,
                            "Snapshot created without starting the volume")
        g.log.info("Snapshot creation failed as expected")

        # Start the volume
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0,
                         "Failed to start the volume %s" % self.volname)
        g.log.info("Volume start succeeded")

        # Create a snapshot of the volume after volume start
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                self.snapname,
                                timestamp=False)
        self.assertEqual(
            ret, 0, "Snapshot creation failed on the volume %s" % self.volname)
        g.log.info("Snapshot create succeeded")

        # Activate snapshot created
        ret, _, err = snap_activate(self.mnode, self.snapname)
        self.assertEqual(
            ret, 0, "Snapshot activate failed with following error %s" % (err))
        g.log.info("Snapshot activated successfully")

        # Enable uss
        self.vol_options['features.uss'] = 'enable'
        ret = set_volume_options(self.mnode, self.volname, self.vol_options)
        self.assertTrue(
            ret, "gluster volume set %s features.uss "
            "enable failed" % self.volname)
        g.log.info("gluster volume set %s features.uss "
                   "enable successfully", self.volname)

        # Validate files /var/lib/glusterd/snaps on all the servers is same
        self.pathname = "/var/lib/glusterd/snaps/%s" % self.snapname
        for server in self.servers:
            conn = g.rpyc_get_connection(server)
            ret = conn.modules.os.path.isdir(self.pathname)
            self.assertTrue(
                ret, "%s directory doesn't exist on node %s" %
                (self.pathname, server))
            g.log.info("%s path exists on node %s", self.pathname, server)
        g.rpyc_close_deployed_servers()

        # Peer detach one node
        self.random_node_peer_detach = random.choice(self.servers[1:])
        ret = peer_detach_servers(self.mnode,
                                  self.random_node_peer_detach,
                                  validate=True)
        self.assertTrue(
            ret,
            "Peer detach of node: %s failed" % self.random_node_peer_detach)
        g.log.info("Peer detach succeeded")

        # /var/lib/glusterd/snaps/<snapname> directory should not present
        conn = g.rpyc_get_connection(self.random_node_peer_detach)
        ret = conn.modules.os.path.isdir(self.pathname)
        self.assertFalse(
            ret, "%s directory should not exist on the peer"
            "which is detached from cluster%s" %
            (self.pathname, self.random_node_peer_detach))
        g.log.info("Expected: %s path doesn't exist on peer detached node %s",
                   self.pathname, self.random_node_peer_detach)
        g.rpyc_close_deployed_servers()

        # Peer probe the detached node
        ret, _, _ = peer_probe(self.mnode, self.random_node_peer_detach)
        self.assertEqual(
            ret, 0,
            "Peer probe of node: %s failed" % self.random_node_peer_detach)
        g.log.info("Peer probe succeeded")

        # Validating peers are in connected state
        count = 0
        while count < 10:
            sleep(2)
            ret = self.validate_peers_are_connected()
            if ret:
                break
            count += 1
        self.assertTrue(ret, "Peers are not in connected state")
        g.log.info("Peer are in connected state")