Example #1
0
        def mount_clone_and_io(clone, mpoint):
            # define mounts
            self.mount_points = []
            self.mounts_dict_list = []
            for client in self.all_clients_info:
                mount = {
                    'protocol': self.mount_type,
                    'server': self.mnode,
                    'volname': clone,
                    'client': self.all_clients_info[client],
                    'mountpoint': (os.path.join("%s" % mpoint)),
                    'options': ''
                }
                self.mounts_dict_list.append(mount)
            self.mounts1 = create_mount_objs(self.mounts_dict_list)
            g.log.info("Successfully made entry in self.mounts")
            # Mounting a volume
            g.log.info("Starting to mount volume")
            ret = mount_volume(clone, self.mount_type, mpoint, self.mnode,
                               self.clients[0])
            self.assertTrue(ret, "Volume mount failed for clone1")
            g.log.info("%s mounted Successfully", clone)

            # Checking volume mounted or not
            ret = is_mounted(clone, mpoint, self.mnode, self.clients[0],
                             self.mount_type)
            self.assertTrue(ret,
                            "Volume not mounted on mount point: %s" % mpoint)
            g.log.info("Volume %s mounted on %s", clone, mpoint)
            return 0
Example #2
0
    def _set_option_and_mount_and_unmount_volumes(self,
                                                  option="",
                                                  is_allowed=True):
        """
        Setting volume option and then mounting and unmounting the volume
        """
        # Check if an option is passed
        if option:
            # Setting the option passed as an argument
            ret = set_volume_options(self.mnode, self.volname,
                                     {option: self.mounts[0].client_system})
            self.assertTrue(
                ret, "Failed to set %s option in volume: %s" %
                (option, self.volname))
            g.log.info("Successfully set %s option in volume: %s", option,
                       self.volname)

        # Mounting a volume
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)

        # Checking if volume was successfully mounted or not
        ret = is_mounted(self.volname,
                         mtype=self.mount_type,
                         mpoint=self.mounts[0].mountpoint,
                         mserver=self.mnode,
                         mclient=self.mounts[0].client_system)
        if is_allowed:
            self.assertTrue(ret,
                            "Failed to mount the volume: %s" % self.volname)
        else:
            self.assertFalse(
                ret, "Unexpected: Mounting"
                " the volume %s was successful" % self.volname)

        # Unmount only if the volume is supposed to be mounted
        if is_allowed:
            ret, _, _ = umount_volume(self.mounts[0].client_system,
                                      self.mounts[0].mountpoint,
                                      mtype=self.mount_type)
            self.assertEqual(ret, 0,
                             "Failed to unmount the volume: %s" % self.volname)
    def tearDown(self):
        # Reset firewall services to the zone
        if not self.firewall_added:
            ret = self._add_firewall_services(self.servers[:2])
            if not ret:
                raise ExecutionError("Failed to add firewall services")

        # Reload firewall services
        ret = self._reload_firewall_service(self.servers[:2])
        if not ret:
            raise ExecutionError("Failed to reload firewall services")

        # Cleanup the volumes and unmount it, if mounted
        vol_list = get_volume_list(self.mnode)
        if vol_list is not None:
            for volume in vol_list:
                ret = is_mounted(volume,
                                 mpoint="/mnt/distribute-vol",
                                 mserver=self.mnode,
                                 mclient=self.servers[1],
                                 mtype="glusterfs")
                if ret:
                    ret, _, _ = umount_volume(mclient=self.servers[1],
                                              mpoint="/mnt/distribute-vol")
                    if ret:
                        raise ExecutionError("Failed to unmount volume")

                ret = cleanup_volume(self.mnode, volume)
                if not ret:
                    raise ExecutionError("Failed to cleanup volume")
                g.log.info("Volume cleaned up successfully : %s", volume)

        # Peer probe detached servers
        pool = nodes_from_pool_list(self.mnode)
        for node in pool:
            peer_detach(self.mnode, node)
        ret = peer_probe_servers(self.mnode, self.servers)
        if not ret:
            raise ExecutionError("Failed to probe detached "
                                 "servers %s" % self.servers)
        g.log.info("Peer probe success for detached "
                   "servers %s", self.servers)
        self.get_super_method(self, 'tearDown')()
Example #4
0
    def test_validate_authreject_vol(self):
        """
        -Set Authentication Reject for client1
        -Check if bricks are online
        -Mounting the vol on client1
        -Check if bricks are online
        -Mounting the vol on client2
        -Reset the Volume
        -Check if bricks are online
        -Mounting the vol on client1
        """
        # pylint: disable=too-many-statements

        # Obtain hostname of clients
        ret, hostname_client1, _ = g.run(self.clients[0], "hostname")
        self.assertEqual(
            ret, 0,
            ("Failed to obtain hostname of client %s" % self.clients[0]))
        g.log.info("Obtained hostname of client. IP- %s, hostname- %s",
                   self.clients[0], hostname_client1.strip())

        # Set Authentication
        option = {"auth.reject": hostname_client1.strip()}
        ret = set_volume_options(self.mnode, self.volname, option)
        self.assertTrue(
            ret, ("Failed to set authentication with option: %s" % option))
        g.log.info("Authentication Set successfully with option: %s", option)

        # Fetching all the bricks
        self.mountpoint = "/mnt/testvol"
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Brick list is empty")
        g.log.info("Brick List : %s", bricks_list)

        # Check are bricks online
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertTrue(ret, "All bricks are not online")

        # Using this way to check because of bug 1586036
        # Mounting volume
        ret, _, _ = mount_volume(self.volname, self.mount_type,
                                 self.mountpoint, self.mnode, self.clients[0])

        # Checking if volume is mounted
        out = is_mounted(self.volname,
                         self.mountpoint,
                         self.mnode,
                         self.clients[0],
                         self.mount_type,
                         user='******')
        if (ret == 0) & (not out):
            g.log.error("Mount executed successfully due to bug 1586036")
        elif (ret == 1) & (not out):
            g.log.info("Expected:Mounting has failed successfully")
        else:
            raise ExecutionError(
                "Unexpected Mounting of Volume %s successful" % self.volname)

        # Checking client logs for authentication error
        cmd = ("grep AUTH_FAILED /var/log/glusterfs/mnt-" "testvol.log")
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(
            ret, 0, "Mounting has not failed due to"
            "authentication error")
        g.log.info("Mounting has failed due to authentication error")

        # Mounting the vol on client2
        # Check bricks are online
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertTrue(ret, "All bricks are not online")

        # Mounting Volume
        ret, _, _ = mount_volume(self.volname, self.mount_type,
                                 self.mountpoint, self.mnode, self.clients[1])
        self.assertEqual(ret, 0, "Failed to mount volume")
        g.log.info("Mounted Successfully")

        # Checking if volume is mounted
        out = is_mounted(self.volname,
                         self.mountpoint,
                         self.mnode,
                         self.clients[1],
                         self.mount_type,
                         user='******')
        self.assertTrue(out, "Volume %s has failed to mount" % self.volname)

        # Reset Volume
        ret, _, _ = volume_reset(mnode=self.mnode, volname=self.volname)
        self.assertEqual(ret, 0, "Failed to reset volume")
        g.log.info("Volume %s reset operation is successful", self.volname)

        # Checking if bricks are online
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertTrue(ret, "All bricks are not online")

        # Mounting Volume
        ret, _, _ = mount_volume(self.volname, self.mount_type,
                                 self.mountpoint, self.mnode, self.clients[0])
        self.assertEqual(ret, 0, "Failed to mount volume")
        g.log.info("Mounted Successfully")

        # Checking if Volume is mounted
        out = is_mounted(self.volname,
                         self.mountpoint,
                         self.servers[0],
                         self.clients[0],
                         self.mount_type,
                         user='******')
        self.assertTrue(out, "Volume %s has failed to mount" % self.volname)
        g.log.info("Volume is mounted successfully %s", self.volname)
    def test_snap_rebalance(self):
        # pylint: disable=too-many-statements, too-many-locals
        """

        Snapshot rebalance contains tests which verifies snapshot clone,
        creating snapshot and performing I/O on mountpoints

        Steps:

        1. Create snapshot of a volume
        2. Activate snapshot
        3. Clone snapshot and Activate
        4. Mount Cloned volume
        5. Perform I/O on mount point
        6. Calculate areequal for bricks and mountpoints
        7. Add-brick more brick to cloned volume
        8. Initiate Re-balance
        9. validate areequal of bricks and mountpoints
        """

        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume %s", self.snap,
                   self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0,
                         ("Failed to Activate snapshot %s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # Creating a Clone of snapshot:
        g.log.info("creating Clone Snapshot")
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
        self.assertEqual(ret, 0, ("Failed to clone volume %s" % self.clone))
        g.log.info("clone volume %s created successfully", self.clone)

        # Starting clone volume
        g.log.info("starting clone volume")
        ret, _, _ = volume_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to start %s" % self.clone)
        g.log.info("clone volume %s started successfully", self.clone)

        # Mounting a clone volume
        g.log.info("Mounting created clone volume")
        ret, _, _ = mount_volume(self.clone, self.mount_type, self.mount1,
                                 self.mnode, self.clients[0])
        self.assertEqual(ret, 0,
                         "clone Volume mount failed for %s" % self.clone)
        g.log.info("cloned volume %s mounted Successfully", self.clone)

        # Validate clone volume mounted or not
        g.log.info("Validate clone volume mounted or not")
        ret = is_mounted(self.clone, self.mount1, self.mnode, self.clients[0],
                         self.mount_type)
        self.assertTrue(
            ret, "Cloned Volume not mounted on mount point: %s" % self.mount1)
        g.log.info("Cloned Volume %s mounted on %s", self.clone, self.mount1)

        # write files to mountpoint
        g.log.info("Starting IO on %s mountpoint...", self.mount1)
        all_mounts_procs = []
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)

        self.check_arequal()

        # expanding volume
        g.log.info("Starting to expand volume")
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, "Failed to expand volume %s" % self.clone)
        g.log.info("Expand volume successful")

        ret, _, _ = rebalance_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to start rebalance")
        g.log.info("Successfully started rebalance on the "
                   "volume %s", self.clone)

        # Log Rebalance status
        g.log.info("Log Rebalance status")
        _, _, _ = rebalance_status(self.mnode, self.clone)

        # Wait for rebalance to complete
        g.log.info("Waiting for rebalance to complete")
        ret = wait_for_rebalance_to_complete(self.mnode, self.clone)
        self.assertTrue(ret, ("Rebalance is not yet complete "
                              "on the volume %s", self.clone))
        g.log.info("Rebalance is successfully complete on "
                   "the volume %s", self.clone)

        # Check Rebalance status after rebalance is complete
        g.log.info("Checking Rebalance status")
        ret, _, _ = rebalance_status(self.mnode, self.clone)
        self.assertEqual(ret, 0, ("Failed to get rebalance status for "
                                  "the volume %s", self.clone))
        g.log.info("Successfully got rebalance status of the "
                   "volume %s", self.clone)

        self.check_arequal()
    def test_subdir_when_renamed(self):

        # pylint: disable=too-many-statements
        """
        Mount the volume
        Create 1 subdir on mountpoint "d1"
        Auth allow - Client1(d1),Client2(full volume)
        Mount the subdir "d1" on client1 and volume on client2
        Start IO's on all the mount points
        Perform rename operation from client2.Rename the subdir
        "d1" to "d1_renamed"
        unmount volume and subdir from clients
        Try mounting "d1" on client 1.This should fail.
        Try mounting "d1_renamed" on client 1.This should fail.
        Again set authentication.Auth allow -
        Client1(d1_renamed),Client2(full volume)
        Mount "d1_renamed" on client1 and volume on client2
        """

        # Create  directory d1 on mount point
        ret = mkdir(self.mounts[0].client_system,
                    "%s/d1" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'd1' on"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes Unmount failed")
        g.log.info("Volumes Unmounted successfully")

        # Set authentication on the subdirectoy "d1" to access by client1
        # and volume to access by client2
        g.log.info(
            'Setting authentication on subdirectory d1 to access'
            'by client %s and on volume to access by client %s',
            self.clients[0], self.clients[1])
        ret = set_auth_allow(self.volname, self.mnode, {
            '/d1': [self.clients[0]],
            '/': [self.clients[1]]
        })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        # Creating mount list for mounting subdir mount and volume
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/d1" % self.volname
        self.subdir_mounts[0].client_system = self.clients[0]
        self.subdir_mounts[1].client_system = self.clients[1]

        # Mount Subdirectory d1 on client 1 and volume on client 2
        for mount_obj in self.subdir_mounts:
            mountpoint = mount_obj.mountpoint
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount  %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully mounted sub directory and volume to"
                   "authenticated clients")

        # Start IO on all the mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.subdir_mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.subdir_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Rename the subdir "d1" to "d1_renamed" from client2
        source_fpath = "%s/d1" % mountpoint
        dest_fpath = "%s/d1_renamed" % mountpoint
        ret = move_file(self.clients[1], source_fpath, dest_fpath)
        self.assertTrue(ret, "Rename subdirectory failed")
        g.log.info('Renamed directory %s to %s', source_fpath, dest_fpath)

        # unmount volume and subdir from client
        ret = self.unmount_volume(self.subdir_mounts)
        self.assertTrue(ret, "Volumes UnMount failed")
        g.log.info("Volumes Unmounted successfully")

        # Try mounting subdir "d1" on client1
        _, _, _ = mount_volume("%s/d1" % self.volname, self.mount_type,
                               mountpoint, self.mnode, self.clients[0])

        ret = is_mounted(self.volname, mountpoint, self.mnode, self.clients[0],
                         self.mount_type)
        self.assertEqual(
            ret, 0, "d1 mount should have failed.But d1 is"
            "successfully mounted on mount point: %s" % mountpoint)
        g.log.info("subdir %s/d1 is not mounted as expected %s", self.volname,
                   mountpoint)

        # Try mounting subdir "d1_renamed" on client1
        _, _, _ = mount_volume("%s/d1_renamed" % self.volname, self.mount_type,
                               mountpoint, self.mnode, self.clients[0])

        ret = is_mounted("%s/d1_renamed" % self.volname, mountpoint,
                         self.mnode, self.clients[0], self.mount_type)
        self.assertEqual(
            ret, 0, "d1_renamed mount should have failed.But"
            "d1_renamed is successfully mounted on : %s" % mountpoint)
        g.log.info("subdir %s/d1_renamed is not mounted as expected %s",
                   self.volname, mountpoint)

        # Set authentication on the subdirectoy "d1_renamed" to access
        # by client1 and volume to access by client2
        g.log.info(
            'Setting authentication on subdirectory d1_renamed to'
            'access by client %s and on volume to access by client %s',
            self.clients[0], self.clients[1])
        ret = set_auth_allow(self.volname, self.mnode, {
            '/d1_renamed': [self.clients[0]],
            '/': [self.clients[1]]
        })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        # Overwriting the list of subdir mount, directory d1 to d1_renamed
        self.subdir_mounts[0].volname = "%s/d1_renamed" % self.volname

        # Mount Subdirectory d1_renamed on client 1 and volume on client 2
        for mount_obj in self.subdir_mounts:
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount  %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)

        g.log.info("Successfully mounted sub directory and volume to"
                   "authenticated clients")

        # Get stat of all the files/dirs created from both clients.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")
Example #7
0
    def test_subdir_with_removebrick(self):

        # pylint: disable=too-many-statements
        """
        Mount the volume
        Create 2 subdir on client subdir1 and subdir2
        Auth allow - Client1(subdir1,subdir2),Client2(subdir1,subdir2)
        Mount the subdir to their respective clients
        Start IO's on both subdirs
        Perform remove-brick
        Validate on client if subdir's are mounted post remove-brick
        operation is performed
        """
        # Create  directories subdir1 and subdir2 on mount point
        ret = mkdir(self.mounts[0].client_system,
                    "%s/subdir1" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'subdir1' in"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        ret = mkdir(self.mounts[0].client_system,
                    "%s/subdir2" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'subdir2' in"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes UnMount failed")
        g.log.info("Volumes UnMounted successfully")

        # Set authentication on the subdirectory subdir1
        # and subdir2 to access by 2 clients
        g.log.info(
            'Setting authentication on subdir1 and subdir2'
            'for client %s and %s', self.clients[0], self.clients[0])
        ret = set_auth_allow(
            self.volname, self.mnode, {
                '/subdir1': [self.clients[0], self.clients[1]],
                '/subdir2': [self.clients[0], self.clients[1]]
            })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        self.mpoint = "/mnt/Mount_Point1"

        # Mount Subdir1 mount on client 1
        _, _, _ = mount_volume("%s/subdir1" % self.volname, self.mount_type,
                               self.mpoint, self.mnode, self.clients[0])

        # Checking subdir1 is mounted or not
        ret = is_mounted("%s/subdir1" % self.volname, self.mpoint, self.mnode,
                         self.clients[0], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir1", self.volname,
                   self.mpoint)

        # Mount Subdir2 mount on client 2
        _, _, _ = mount_volume("%s/subdir2" % self.volname, self.mount_type,
                               self.mpoint, self.mnode, self.clients[1])

        # Checking subdir2 is mounted or not
        ret = is_mounted("%s/subdir2" % self.volname, self.mpoint, self.mnode,
                         self.clients[1], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir2", self.volname,
                   self.mpoint)

        # Start IO on all the subdir mounts.
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/subdir1" % self.volname
        self.subdir_mounts[1].volname = "%s/subdir2" % self.volname
        all_mounts_procs = []
        count = 1
        for mount_obj in self.subdir_mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       self.mpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, self.mpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.subdir_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Perform remove brick operation when subdir is mounted on client
        g.log.info("Start removing bricks from volume")
        ret = shrink_volume(self.mnode, self.volname, rebalance_timeout=600)
        self.assertTrue(ret, ("Remove brick operation failed on "
                              "%s", self.volname))
        g.log.info("Remove brick operation is successful on "
                   "volume %s", self.volname)

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("All volume %s processes failed to come up "
                              "online", self.volname))
        g.log.info("All volume %s processes came up "
                   "online successfully", self.volname)

        # Log Volume Info and Status after performing remove brick
        g.log.info("Logging volume info and Status after shrinking volume")
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Logging volume info and status failed on "
                              "volume %s", self.volname))
        g.log.info("Successful in logging volume info and status of volume %s",
                   self.volname)

        # Again Checking subdir1 is mounted or not on Client 1
        ret = is_mounted("%s/subdir1" % self.volname, self.mpoint, self.mnode,
                         self.clients[0], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir1", self.volname,
                   self.mpoint)

        # Again Checking subdir2 is mounted or not on Client 2
        ret = is_mounted("%s/subdir2" % self.volname, self.mpoint, self.mnode,
                         self.clients[1], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir2", self.volname,
                   self.mpoint)
    def test_brick_log_messages(self):
        '''
        -> Create volume
        -> Mount volume
        -> write files on mount point
        -> delete files from mount point
        -> check for any errors filled in all brick logs
        '''

        # checking volume mounted or not
        for mount_obj in self.mounts:
            ret = is_mounted(self.volname, mount_obj.mountpoint, self.mnode,
                             mount_obj.client_system, self.mount_type)
            self.assertTrue(ret, "Not mounted on %s" % mount_obj.client_system)
            g.log.info("Mounted on %s", mount_obj.client_system)

        # run IOs
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 2 "
                "--dir-length 5 "
                "--max-num-of-dirs 3 "
                "--num-of-files 10 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            self.counter = self.counter + 10

        # Validate IO
        self.assertTrue(validate_io_procs(self.all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # Getting timestamp
        _, timestamp, _ = g.run_local('date +%s')
        timestamp = timestamp.strip()

        # Getting all bricks
        brick_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(brick_list, "Failed to get brick list")
        g.log.info("Successful in getting brick list %s", brick_list)

        # Creating dictionary for each node brick path,
        # here nodes are keys and brick paths are values
        brick_path_dict = {}
        for brick in brick_list:
            node, brick_path = brick.split(r':')
            brick_path_list = brick_path.split(r'/')
            del brick_path_list[0]
            brick_log_path = '-'.join(brick_path_list)
            brick_path_dict[node] = brick_log_path

        for node in brick_path_dict:
            #  Copying brick logs into other file for backup purpose
            ret, _, _ = g.run(
                node, 'cp /var/log/glusterfs/bricks/%s.log '
                '/var/log/glusterfs/bricks/%s_%s.log' %
                (brick_path_dict[node], brick_path_dict[node], timestamp))
            if ret:
                raise ExecutionError("Failed to copy brick logs of %s" % node)
            g.log.info("Brick logs copied successfully on node %s", node)

            # Clearing the existing brick log file
            ret, _, _ = g.run(
                node, 'echo > /var/log/glusterfs/bricks/%s.log' %
                brick_path_dict[node])
            if ret:
                raise ExecutionError("Failed to clear brick log file on %s" %
                                     node)
            g.log.info("Successfully cleared the brick log files on node %s",
                       node)

        # Deleting files from mount point
        ret, _, _ = g.run(self.mounts[0].client_system,
                          'rm -rf %s/*' % self.mounts[0].mountpoint)
        self.assertEqual(
            ret, 0, "Failed to delete files from mountpoint %s" %
            self.mounts[0].mountpoint)
        g.log.info("Files deleted successfully from mountpoint %s",
                   self.mounts[0].mountpoint)

        # Searching for error messages in brick logs after deleting
        # files from mountpoint
        for node in brick_path_dict:
            ret, out, _ = g.run(
                node, "grep ' E ' /var/log/glusterfs/bricks/%s.log | wc -l" %
                brick_path_dict[node])
            self.assertEqual(int(out), 0, "Found Error messages in brick "
                             "log %s" % node)
            g.log.info("No error messages found in brick log %s", node)
Example #9
0
    def test_rebalance_status_from_newly_probed_node(self):

        # Peer probe first 3 servers
        servers_info_from_three_nodes = {}
        for server in self.servers[0:3]:
            servers_info_from_three_nodes[server] = self.all_servers_info[
                server]
            # Peer probe the first 3 servers
            ret, _, _ = peer_probe(self.mnode, server)
            self.assertEqual(ret, 0, "Peer probe failed to %s" % server)

        self.volume['servers'] = self.servers[0:3]
        # create a volume using the first 3 nodes
        ret = setup_volume(self.mnode,
                           servers_info_from_three_nodes,
                           self.volume,
                           force=True)
        self.assertTrue(
            ret, "Failed to create"
            "and start volume %s" % self.volname)

        # Mounting a volume
        ret = self.mount_volume(self.mounts)
        self.assertTrue(ret, "Volume mount failed for %s" % self.volname)

        # Checking volume mounted or not
        ret = is_mounted(self.volname, self.mounts[0].mountpoint, self.mnode,
                         self.mounts[0].client_system, self.mount_type)
        self.assertTrue(
            ret, "Volume not mounted on mount point: %s" %
            self.mounts[0].mountpoint)
        g.log.info("Volume %s mounted on %s", self.volname,
                   self.mounts[0].mountpoint)

        # run IOs
        g.log.info("Starting IO on all mounts...")
        self.counter = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = (
                "python %s create_deep_dirs_with_files "
                "--dirname-start-num %d "
                "--dir-depth 10 "
                "--dir-length 5 "
                "--max-num-of-dirs 3 "
                "--num-of-files 100 %s" %
                (self.script_upload_path, self.counter, mount_obj.mountpoint))
            ret = g.run(mount_obj.client_system, cmd)
            self.assertEqual(ret, 0,
                             "IO failed on %s" % mount_obj.client_system)
            self.counter = self.counter + 10

        # add a brick to the volume and start rebalance
        brick_to_add = form_bricks_list(self.mnode, self.volname, 1,
                                        self.servers[0:3],
                                        servers_info_from_three_nodes)
        ret, _, _ = add_brick(self.mnode, self.volname, brick_to_add)
        self.assertEqual(ret, 0, "Failed to add a brick to %s" % self.volname)

        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to start rebalance")

        # peer probe a new node from existing cluster
        ret, _, _ = peer_probe(self.mnode, self.servers[3])
        self.assertEqual(ret, 0, "Peer probe failed")

        ret = get_rebalance_status(self.servers[3], self.volname)
        self.assertIsNone(ret, "Failed to get rebalance status")
    def test_volume_checksum_after_changing_network_ping_timeout(self):

        # Create Volume
        # Mount the Volume
        # Create some files on mount point
        # calculate the checksum of Mount point
        # Check the default network ping timeout of the volume.
        # Change network ping timeout to some other value
        # calculate checksum again
        # checksum should be same without remounting the volume.

        # Mounting volume as glusterfs
        ret = self.mount_volume(self.mounts)
        self.assertTrue(ret, "volume mount failed for %s" % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)

        # Checking volume mounted or not
        ret = is_mounted(self.volname, self.mounts[0].mountpoint, self.mnode,
                         self.mounts[0].client_system, self.mount_type)
        self.assertTrue(ret, "Volume not mounted on mount point: %s"
                        % self.mounts[0].mountpoint)
        g.log.info("Volume %s mounted on %s", self.volname,
                   self.mounts[0].mountpoint)

        # run IOs
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_files -f 10 --base-file-name newfile %s"
                   % (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)

        # Validate IO
        g.log.info("Wait for IO to complete and validate IO ...")
        ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # Checksum calculation of mount point before
        # changing network.ping-timeout
        ret, before_checksum = collect_mounts_arequal(self.mounts)
        self.assertTrue(ret, "checksum failed to calculate for mount point")
        g.log.info("checksum calculated successfully")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")

        # performing gluster volume get volname all and
        # getting network ping time out value
        volume_options = get_volume_options(self.mnode, self.volname, "all")
        self.assertIsNotNone(volume_options, "gluster volume get %s all "
                                             "command failed" % self.volname)
        g.log.info("gluster volume get %s all command executed "
                   "successfully", self.volname)
        ret = False
        if re.search(r'\b42\b', volume_options['network.ping-timeout']):
            ret = True
        self.assertTrue(ret, "network ping time out value is not correct")
        g.log.info("network ping time out value is correct")

        # Changing network ping time out value to specific volume
        self.networking_ops = {'network.ping-timeout': '12'}
        ret = set_volume_options(self.mnode, self.volname,
                                 self.networking_ops)
        self.assertTrue(ret, "Changing of network.ping-timeout "
                             "failed for :%s" % self.volname)
        g.log.info("Changing of network.ping-timeout "
                   "success for :%s", self.volname)

        # Checksum calculation of mount point after
        # changing network.ping-timeout
        ret, after_checksum = collect_mounts_arequal(self.mounts)
        self.assertTrue(ret, "checksum failed to calculate for mount point")
        g.log.info("checksum calculated successfully")

        # comparing list of checksums of mountpoints before and after
        # network.ping-timeout change
        self.assertItemsEqual(before_checksum, after_checksum,
                              "Checksum not same before and after "
                              "network.ping-timeout change")
        g.log.info("checksum same before and after "
                   "changing network.ping-timeout")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")
    def test_snap_self_heal(self):
        """
        Steps:

        1. create a volume
        2. mount volume
        3. create snapshot of that volume
        4. Activate snapshot
        5. Clone snapshot and Mount
        6. Perform I/O
        7. Bring Down Few bricks from volume without
           affecting the volume or cluster.
        8. Perform I/O
        9. Bring back down bricks to online
        10. Validate heal is complete with areequal

        """
        # pylint: disable=too-many-statements, too-many-locals
        # Creating snapshot:
        g.log.info("Starting to Create snapshot")
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume %s", self.snap,
                   self.volname)

        # Activating snapshot
        g.log.info("Starting to Activate Snapshot")
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0,
                         ("Failed to Activate snapshot %s" % self.snap))
        g.log.info("Snapshot %s activated successfully", self.snap)

        # snapshot list
        ret, _, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, ("Failed to list all the snapshot"))
        g.log.info("Snapshot list command was successful")

        # Creating a Clone volume from snapshot:
        g.log.info("Starting to Clone volume from Snapshot")
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone)
        self.assertEqual(ret, 0, ("Failed to clone %s from snapshot %s" %
                                  (self.clone, self.snap)))
        g.log.info("%s created successfully", self.clone)

        #  start clone volumes
        g.log.info("start to created clone volumes")
        ret, _, _ = volume_start(self.mnode, self.clone)
        self.assertEqual(ret, 0, "Failed to start clone %s" % self.clone)
        g.log.info("clone volume %s started successfully", self.clone)

        # Mounting a clone volume
        g.log.info("Mounting a clone volume")
        ret, _, _ = mount_volume(self.clone, self.mount_type, self.mount1,
                                 self.mnode, self.clients[0])
        self.assertEqual(ret, 0,
                         "Failed to mount clone Volume %s" % self.clone)
        g.log.info("Clone volume %s mounted Successfully", self.clone)

        # Checking cloned volume mounted or not
        ret = is_mounted(self.clone, self.mount1, self.mnode, self.clients[0],
                         self.mount_type)
        self.assertTrue(
            ret,
            "Failed to mount clone volume on mount point: %s" % self.mount1)
        g.log.info("clone Volume %s mounted on %s", self.clone, self.mount1)

        # write files on all mounts
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mount1)
        all_mounts_procs = []
        cmd = ("python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)
        g.log.info("Successful in creating I/O on mounts")

        # get the bricks from the volume
        g.log.info("Fetching bricks for the volume : %s", self.clone)
        bricks_list = get_all_bricks(self.mnode, self.clone)
        g.log.info("Brick List : %s", bricks_list)

        # Select bricks to bring offline
        g.log.info("Starting to bring bricks to offline")
        bricks_to_bring_offline_dict = (select_bricks_to_bring_offline(
            self.mnode, self.volname))
        bricks_to_bring_offline = filter(
            None, (bricks_to_bring_offline_dict['hot_tier_bricks'] +
                   bricks_to_bring_offline_dict['cold_tier_bricks'] +
                   bricks_to_bring_offline_dict['volume_bricks']))
        g.log.info("Brick to bring offline: %s ", bricks_to_bring_offline)
        ret = bring_bricks_offline(self.clone, bricks_to_bring_offline)
        self.assertTrue(ret, "Failed to bring the bricks offline")
        g.log.info("Successful in bringing bricks: %s offline",
                   bricks_to_bring_offline)

        # Offline Bricks list
        offline_bricks = get_offline_bricks_list(self.mnode, self.clone)
        self.assertIsNotNone(
            offline_bricks, "Failed to get offline bricklist"
            "for volume %s" % self.clone)
        for bricks in offline_bricks:
            self.assertIn(bricks, bricks_to_bring_offline,
                          "Failed to validate "
                          "Bricks offline")
        g.log.info("Bricks Offline: %s", offline_bricks)

        # Online Bricks list
        online_bricks = get_online_bricks_list(self.mnode, self.clone)
        self.assertIsNotNone(
            online_bricks, "Failed to get online bricks"
            " for volume %s" % self.clone)
        g.log.info("Bricks Online: %s", online_bricks)

        # write files mountpoint
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mount1)
        all_mounts_procs = []
        cmd = ("python %s create_files "
               "-f 10 --base-file-name file %s" %
               (self.script_upload_path, self.mount1))
        proc = g.run(self.clients[0], cmd)
        all_mounts_procs.append(proc)
        g.log.info("Successful in creating I/O on mounts")

        # Bring all bricks online
        g.log.info("bring all bricks online")
        ret = bring_bricks_online(self.mnode, self.clone,
                                  bricks_to_bring_offline)
        self.assertTrue(ret, "Failed to bring bricks online")
        g.log.info("Successful in bringing all bricks online")

        # Validate Bricks are online
        g.log.info("Validating all bricks are online")
        ret = are_bricks_online(self.mnode, self.clone, bricks_list)
        self.assertTrue(ret, "Failed to bring all the bricks online")
        g.log.info("bricks online: %s", bricks_list)

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.clone)
        self.assertTrue(ret, ("Failed to wait for volume %s processes to "
                              "be online" % self.clone))
        g.log.info(
            "Successful in waiting for volume %s processes to be "
            "online", self.clone)

        # Verify volume's all process are online
        g.log.info("Verifying volume's all process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.clone)
        self.assertTrue(
            ret, ("Volume %s : All process are not online" % self.clone))
        g.log.info("Volume %s : All process are online", self.clone)

        # wait for the heal process to complete
        g.log.info("waiting for heal process to complete")
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, "Failed to complete the heal process")
        g.log.info("Successfully completed heal process")

        # Check areequal
        # get the subvolumes
        g.log.info("Starting to get sub-volumes for volume %s", self.clone)
        subvols = get_subvols(self.mnode, self.clone)
        num_subvols = len(subvols['volume_subvols'])
        g.log.info("Number of subvolumes in volume %s:", num_subvols)

        # Get arequals and compare
        g.log.info("Starting to Compare areequals")
        for i in range(0, num_subvols):
            # Get arequal for first brick
            subvol_brick_list = subvols['volume_subvols'][i]
            node, brick_path = subvol_brick_list[0].split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, arequal, _ = g.run(node, command)
            first_brick_total = arequal.splitlines()[-1].split(':')[-1]

        # Get arequal for every brick and compare with first brick
        for brick in subvol_brick_list:
            node, brick_path = brick.split(':')
            command = ('arequal-checksum -p %s '
                       '-i .glusterfs -i .landfill -i .trashcan' % brick_path)
            ret, brick_arequal, _ = g.run(node, command)
            self.assertFalse(ret, 'Failed to get arequal on brick %s' % brick)
            g.log.info('Getting arequal for %s is successful', brick)
            brick_total = brick_arequal.splitlines()[-1].split(':')[-1]
            self.assertEqual(
                first_brick_total, brick_total,
                'Arequals for subvol and %s are not equal' % brick)
            g.log.info('Arequals for subvol and %s are equal', brick)
        g.log.info('All arequals are equal for distributed-replicated')
    def test_snap_clone_snapd(self):
        """
        Steps:

        1. create a volume
        2. Create a snapshots and activate
        3. Clone the snapshot and mount it
        4. Check for snapd daemon
        5. enable uss and validate snapd
        5. stop cloned volume
        6. Validate snapd
        7. start cloned volume
        8. validate snapd
        9. Create 5 more snapshot
        10. Validate total number of
            snapshots created.
        11. Activate 5 snapshots
        12. Enable USS
        13. Validate snapd
        14. kill snapd on all nodes
        15. validate snapd running
        16. force start clone volume
        17. validate snaps inside .snaps directory
        """
        # pylint: disable=too-many-statements, too-many-locals

        # Starting I/O
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" % (
                       self.script_upload_path,
                       mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate I/O
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # Creating snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
        self.assertEqual(ret, 0, ("Failed to create snapshot for volume %s"
                                  % self.volname))
        g.log.info("Snapshot %s created successfully for "
                   "volume %s", self.snap, self.volname)

        # Activating created snapshots
        ret, _, _ = snap_activate(self.mnode, self.snap)
        self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
                                  % self.snap))
        g.log.info("Snapshot snap%s activated successfully", self.snap)

        # Snapshot list
        self.assertIsNotNone(
            get_snap_list(self.mnode), "Failed to list snapshot")
        g.log.info("Snapshot list command Successful")

        # Creating and starting a Clone of snapshot:
        ret, _, _ = snap_clone(self.mnode, self.snap, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to clone %s" % self.clone_vol1)
        g.log.info("Clone volume %s created successfully", self.clone_vol1)

        # Start the clone volumes
        ret, _, _ = volume_start(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to start %s" % self.clone_vol1)
        g.log.info("%s started successfully", self.clone_vol1)

        # Form server list
        brick_list = get_all_bricks(self.mnode, self.clone_vol1)
        for bricks in brick_list:
            self.server_lists.append(bricks.split(":")[0])
        self.server_list = list(set(self.server_lists))

        # Get volume info
        vol_info = get_volume_info(self.mnode, self.clone_vol1)
        self.assertIsNotNone(vol_info, "Failed to get vol info")
        g.log.info("Successfully in getting vol info")

        # Redefining mounts for cloned volume
        self.mount_points, self.mounts_dict_list = [], []
        for client in self.all_clients_info:
            mount = {
                'protocol': self.mount_type,
                'server': self.mnode,
                'volname': self.volname,
                'client': self.all_clients_info[client],
                'mountpoint': (path.join(
                    "%s" % self.mpoint)),
                'options': ''
            }
            self.mounts_dict_list.append(mount)
        self.mount1 = create_mount_objs(self.mounts_dict_list)
        self.mount_points.append(self.mpoint)
        g.log.info("Successfully made entry in self.mount1")

        # FUSE mount clone1 volume
        for mount_obj in self.mounts:
            ret, _, _ = mount_volume(self.clone_vol1, self.mount_type,
                                     self.mpoint,
                                     self.mnode, mount_obj.client_system)
            self.assertEqual(ret, 0, "Volume mount failed for clone1")
            g.log.info("%s mounted Successfully", self.clone_vol1)

            # Validate clone volume is mounted or not
            ret = is_mounted(self.clone_vol1, self.mpoint, self.mnode,
                             mount_obj.client_system, self.mount_type)
            self.assertTrue(ret, "Volume not mounted on mount point: "
                            "%s" % self.mpoint)
            g.log.info("Volume %s mounted on %s", self.clone_vol1, self.mpoint)

        # Log Cloned Volume information
        ret = log_volume_info_and_status(self.mnode, self.clone_vol1)
        self.assertTrue("Failed to Log Info and Status of Volume "
                        "%s" % self.clone_vol1)
        g.log.info("Successfully Logged Info and Status")

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to enable USS on cloned volume")
        g.log.info("Successfully enabled USS on Cloned volume")

        # Validate USS running
        self.validate_uss()

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Stop cloned volume
        ret, _, _ = volume_stop(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to stop cloned volume "
                         "%s" % self.clone_vol1)
        g.log.info("Successfully Stopped Cloned volume %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Start cloned volume
        ret, _, _ = volume_start(self.mnode, self.clone_vol1)
        self.assertEqual(ret, 0, "Failed to start cloned volume"
                         " %s" % self.clone_vol1)
        g.log.info("Successfully started cloned volume"
                   " %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Create 5 snapshots
        self.snaps_list = [('test_snap_clone_snapd-snap%s'
                            % i)for i in range(0, 5)]
        for snapname in self.snaps_list:
            ret, _, _ = snap_create(self.mnode, self.clone_vol1,
                                    snapname)
            self.assertEqual(ret, 0, ("Failed to create snapshot for volume"
                                      " %s" % self.clone_vol1))
            g.log.info("Snapshot %s created successfully for volume "
                       "%s", snapname, self.clone_vol1)

        # Validate USS running
        self.validate_uss()

        # Check snapshot under .snaps directory
        self.check_snaps()

        # Activate Snapshots
        for snapname in self.snaps_list:
            ret, _, _ = snap_activate(self.mnode, snapname)
            self.assertEqual(ret, 0, ("Failed to activate snapshot %s"
                                      % snapname))
            g.log.info("Snapshot %s activated "
                       "successfully", snapname)

        # Validate USS running
        self.validate_uss()

        # Validate snapshots under .snaps folder
        self.validate_snaps()

        # Kill snapd on node and validate snapd except management node
        for server in self.servers[1:]:
            ret, _, _ = terminate_snapd_on_node(server)
            self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
                             % server)
            g.log.info("snapd Killed Successfully on node %s", server)

            # Check snapd running
            ret = is_snapd_running(server, self.clone_vol1)
            self.assertTrue(ret, "Unexpected: Snapd running on node: "
                            "%s" % server)
            g.log.info("Expected: Snapd is not running on node:%s", server)

            # Check snapshots under .snaps folder
            g.log.info("Validating snapshots under .snaps")
            ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
            self.assertEqual(ret, 0, "Target endpoint not connected")
            g.log.info("Successfully listed snapshots under .snaps")

        # Kill snapd in management node
        ret, _, _ = terminate_snapd_on_node(self.servers[0])
        self.assertEqual(ret, 0, "Failed to Kill snapd on node %s"
                         % self.servers[0])
        g.log.info("snapd Killed Successfully on node %s", self.servers[0])

        # Validate snapd running on all nodes
        self.validate_snapd(check_condition=False)

        # Validating snapshots under .snaps
        ret, _, _ = uss_list_snaps(self.clients[0], self.mpoint)
        self.assertNotEqual(ret, 0, "Unexpected: Successfully listed "
                            "snapshots under .snaps")
        g.log.info("Expected: Target endpoint not connected")

        # Start the Cloned volume(force start)
        ret, _, _ = volume_start(self.mnode, self.clone_vol1, force=True)
        self.assertEqual(ret, 0, "Failed to start cloned volume "
                         "%s" % self.clone_vol1)
        g.log.info("Successfully Started Cloned volume %s", self.clone_vol1)

        # Validate snapd running on all nodes
        self.validate_snapd()

        # Validate snapshots under .snaps folder
        self.validate_snaps()
    def test_volume_status_fd(self):

        '''
        -> Create volume
        -> Mount the volume on 2 clients
        -> Run I/O's on mountpoint
        -> While I/O's are in progress
        -> Perform gluster volume status fd repeatedly
        -> List all files and dirs listed
        '''

        # checking volume mounted or not
        for mount_obj in self.mounts:
            ret = is_mounted(self.volname, mount_obj.mountpoint, self.mnode,
                             mount_obj.client_system, self.mount_type)
            self.assertTrue(ret, "Not mounted on %s"
                            % mount_obj.client_system)
            g.log.info("Mounted on %s", mount_obj.client_system)

        # run IOs
        g.log.info("Starting IO on all mounts...")
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 15 %s" % (self.script_upload_path,
                                             self.counter,
                                             mount_obj.mountpoint))

            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
            self.counter = self.counter + 10
        self.io_validation_complete = False

        # performing  "gluster volume status volname fd" command on
        # all cluster servers randomly while io is in progress,
        # this command should not get hang while io is in progress
        count = 0
        while count < 300:
            ret, _, _ = g.run(random.choice(self.servers),
                              "gluster volume status %s fd" % self.volname)
            self.assertEqual(ret, 0, ("Volume status 'fd' failed on volume %s"
                                      % self.volname))
            g.log.info("Volume status fd is successful for %s", self.volname)
            count += 1

        # Validate IO
        self.assertTrue(
            validate_io_procs(self.all_mounts_procs, self.mounts),
            "IO failed on some of the clients"
        )
        self.io_validation_complete = True

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")
Example #14
0
    def test_validate_authreject_vol(self):
        """
        -Set Authentication
        -For all the clients
        -Fetch the bricks
        -Check if bricks are online
        -Create directory
        -Mount the volume
        -Check if it is mounted
        -Check authentication logs
        -Reset the Volume
        -Check if bricks are online
        -Mounting the vol on client1
        """
        # pylint: disable=too-many-statements
        # Set Authentication
        option = {"auth.reject": "\"*\""}
        ret = set_volume_options(self.mnode, self.volname, option)
        self.assertTrue(ret, "Failed to set authentication")
        g.log.info("Authentication set Successfully")

        for client in self.clients:
            # Fetching all the bricks
            self.mountpoint = '/mnt/testvol'
            g.log.info("Fetching bricks for the volume : %s", self.volname)
            bricks_list = get_all_bricks(self.mnode, self.volname)
            self.assertIsNotNone(bricks_list, "Brick list is empty")
            g.log.info("Brick List : %s", bricks_list)

            # Check are bricks online
            ret = are_bricks_online(self.mnode, self.volname, bricks_list)
            self.assertTrue(ret, "All bricks are not online")
            g.log.info("All bricks are online")

            # Creating directory to mount
            cmd = ("mkdir -p /mnt/testvol")
            ret, _, _ = g.run(client, cmd)
            self.assertEqual(ret, 0, "Failed to create directory")

            # Using this way to check because of bug 1586036
            # Mounting volume
            ret, _, _ = mount_volume(self.volname, self.mount_type,
                                     self.mountpoint, self.mnode, client)

            # Checking if volume is mounted
            out = is_mounted(self.volname,
                             self.mountpoint,
                             self.mnode,
                             client,
                             self.mount_type,
                             user='******')
            if (ret == 0) & (not out):
                g.log.error("Mount executed successfully due to bug 1586036")
            elif (ret == 1) & (not out):
                g.log.info("Expected:Mounting has failed successfully")
            else:
                raise ExecutionError("Unexpected Mounting of Volume %s"
                                     "successful" % self.volname)

            # Checking client logs for authentication error
            cmd = ("grep AUTH_FAILED /var/log/glusterfs/mnt-" "testvol.log")
            ret, _, _ = g.run(client, cmd)
            self.assertEqual(
                ret, 0, "Mounting has not failed due to"
                "authentication error")
            g.log.info("Mounting has failed due to authentication error")

        # Reset Volume
        ret, _, _ = volume_reset(mnode=self.mnode, volname=self.volname)
        self.assertEqual(ret, 0, "Failed to reset volume")
        g.log.info("Volume %s reset operation is successful", self.volname)

        # Check if bricks are online and  Mounting the vol on client1
        # Fetching bricks
        g.log.info("Fetching bricks for the volume : %s", self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Brick list is empty")
        g.log.info("Brick List : %s", bricks_list)

        # Checking if bricks are online
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertTrue(ret, "All bricks are not online")
        g.log.info("All bricks are online")

        # Creating directory to mount
        cmd = ("mkdir -p /mnt/testvol")
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "Failed to create directory")

        # Mounting Volume
        ret, _, _ = mount_volume(self.volname, self.mount_type,
                                 self.mountpoint, self.servers[0],
                                 self.clients[0])
        self.assertEqual(ret, 0, "Failed to mount volume")
        g.log.info("Mounted Successfully")

        # Checking if Volume is mounted
        out = is_mounted(self.volname,
                         self.mountpoint,
                         self.servers[0],
                         self.clients[0],
                         self.mount_type,
                         user='******')
        self.assertTrue(out, "Volume %s has failed to mount" % self.volname)
        g.log.info("Volume is mounted successfully %s", self.volname)