Example #1
0
    def test_nfs_ganesha_remove_brick(self):
        """
        Verify remove brick operation while IO is running
        Steps:
        1. Start IO on mount points
        2. Perform remove brick operation
        3. Validate IOs
        """
        # pylint: disable=too-many-statements
        # Start IO on all mount points
        all_mounts_procs, count = [], 1
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" % (self.script_upload_path, count,
                                            mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count += 10

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Perform remove brick operation
        ret = shrink_volume(self.mnode, self.volname)
        self.assertTrue(ret, ("Remove brick operation failed on "
                              "%s", self.volname))
        g.log.info("Remove brick operation is successful on "
                   "volume %s", self.volname)

        # Wait for volume processes to be online
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("All volume %s processes failed to come up "
                              "online", self.volname))
        g.log.info("All volume %s processes came up "
                   "online successfully after remove brick operation",
                   self.volname)

        # Log volume info and status after performing remove brick
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Logging volume info and status failed on "
                              "volume %s", self.volname))
        g.log.info("Successful in logging volume info and status of volume %s",
                   self.volname)

        # Validate IO
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")
    def test_new_mount_while_io_in_progress(self):
        """
        Verify new mount will not cause any issues while IO is running
        Steps:
        1. Mount volume on one client
        2. Start IO
        3. Mount volume on new mountpoint
        4. Start IO on new mountpoint
        5. Validate IOs
        """
        # Take 2 mounts if available
        no_of_mount_objects = len(self.mounts)
        if no_of_mount_objects == 1:
            self.mount_obj1 = self.mounts[0]
            self.mount_obj2 = deepcopy(self.mounts[0])
            self.mount_obj2.mountpoint = '%s_new' % self.mount_obj2.mountpoint
        else:
            self.mount_obj1 = self.mounts[0]
            self.mount_obj2 = self.mounts[1]

        self.new_mounts = [self.mount_obj1, self.mount_obj2]

        all_mounts_procs = []
        dirname_start_num = 1

        for mount_object in self.new_mounts:
            # Mount volume
            ret = self.mount_obj1.mount()
            self.assertTrue(
                ret, "Unable to mount volume %s on %s" %
                (mount_object.volname, mount_object.client_system))

            # Start IO
            g.log.info("Starting IO on %s:%s", mount_object.client_system,
                       mount_object.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, dirname_start_num,
                    mount_object.mountpoint))
            proc = g.run_async(mount_object.client_system,
                               cmd,
                               user=mount_object.user)
            all_mounts_procs.append(proc)
            dirname_start_num += 10

        # Validate IOs
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.new_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all IOs")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.new_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")
    def test_file_dir_create_ops_on_volume(self):
        """Test File Directory Creation on the volume.
        """
        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
Example #4
0
    def test_file_dir_create_ops_on_volume(self):
        """Test File Directory Creation on the volume.
        """
        # Mount Volume
        g.log.info("Starting to Mount Volume %s", self.volname)
        ret = self.mount_volume(self.mounts)
        self.assertTrue(ret, ("Failed to Mount Volume %s", self.volname))
        g.log.info("Successful in Mounting Volume %s", self.volname)

        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)
Example #5
0
    def test_root_squash_mount_unmount(self):
        """
        Tests to verify Nfs Ganesha rootsquash functionality when volume
        is remounted
        Steps:
        1. Set permission as 777 for mount point
        2. Create some files and dirs inside mount point
        3. Enable root-squash on volume
        4. Create some more files and dirs inside mount point
        5. Unmount the volume
        6. Remount the volume
        7. Try to edit file created in step 2
           It should not allow to edit the file
        8. Try to edit the file created in step 4
           It should allow to edit the file
        9. Create some more files and directory inside mount point
            It should be created as nobody user
        10. Disable root-squash on volume
        11. Edit any of the file created in step 2.
            It should allow to edit the file
        """
        # Set mount point permission to 777
        ret = set_file_permissions(self.mounts[0].client_system,
                                   self.mounts[0].mountpoint, 777)
        self.assertTrue(ret, "Failed to set permission for directory")
        g.log.info("Successfully set permissions for directory")

        # Create Directories on Mount point
        cmd = ("for i in {1..20}; do mkdir %s/dir$i; done" %
               self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system,
                            cmd,
                            user=self.mounts[0].user)
        self.assertEqual(ret, 0, err)

        # Create multiple files inside directories on mount point.
        for i in range(1, 21):
            cmd = ("for j in {1..20}; do touch %s/dir%s/file$j; done" %
                   (self.mounts[0].mountpoint, i))
            ret, _, err = g.run(self.mounts[0].client_system,
                                cmd,
                                user=self.mounts[0].user)
            self.assertEqual(ret, 0, err)

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successful in getting stats of files/dirs "
                   "from mount point")

        # Enable root-squash on volume
        ret = set_root_squash(self.servers[0], self.volname)
        self.assertTrue(ret, "Failed to enable root-squash on volume")
        g.log.info("root-squash is enable on the volume")

        # Create some more Directories after enabling root-squash
        cmd = ("for i in {1..20}; do mkdir %s/squashed_dir$i; done" %
               self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system,
                            cmd,
                            user=self.mounts[0].user)
        self.assertEqual(ret, 0, err)

        # Create some more files inside directories
        for i in range(1, 21):
            cmd = ("for j in {1..20}; do touch "
                   "%s/squashed_dir%s/squashed_file$j; done" %
                   (self.mounts[0].mountpoint, i))
            ret, _, err = g.run(self.mounts[0].client_system,
                                cmd,
                                user=self.mounts[0].user)
            self.assertEqual(ret, 0, err)

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successful in getting stats of files/dirs "
                   "from mount point")

        # Unmount volume
        ret = unmount_mounts(self.mounts)
        self.assertTrue(ret, "Volume unmount failed for %s" % self.volname)

        # Remount volume
        ret = self.mount_volume(self.mounts)
        self.assertTrue(ret, "Volume mount failed for %s" % self.volname)

        # Edit file created by root user
        for mount_obj in self.mounts:
            ret = append_string_to_file(
                mount_obj.client_system,
                "%s/dir10/file10" % mount_obj.mountpoint, 'hello')
            self.assertFalse(
                ret, "Unexpected:nobody user editing file "
                "created by root user should FAIL")
            g.log.info("Successful:nobody user failed to edit file "
                       "created by root user")

        # Edit the file created by nobody user
        for mount_obj in self.mounts:
            ret = append_string_to_file(
                mount_obj.client_system,
                "%s/squashed_dir10/squashed_file10" % mount_obj.mountpoint,
                'hello')
            self.assertTrue(
                ret, "Unexpected:nobody user failed to edit "
                "the file created by nobody user")
            g.log.info("Successful:nobody user successfully edited the "
                       "file created by nobody user")

        # Create some more files on mount point post remount.
        cmd = ("for i in {1..20}; do touch %s/remount_file$i; done" %
               self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system,
                            cmd,
                            user=self.mounts[0].user)
        self.assertEqual(ret, 0, err)

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successful in getting stats of files/dirs "
                   "from mount point")

        # Check for owner and group of all the files
        for mount_obj in self.mounts:
            for i in range(1, 21):
                cmd = (
                    "ls -l %s/remount_file%i | awk '{ print $3, $4 }' |sort" %
                    (mount_obj.mountpoint, i))
                ret, out, err = g.run(mount_obj.client_system, cmd)
                self.assertFalse(ret, err)
                if is_rhel7:
                    self.assertIn("nobody nobody", out,
                                  "Owner and group is not nobody")
                else:
                    self.assertIn("nfsnobody nfsnobody", out,
                                  "Owner and group is not nobody")
                g.log.info("Owner and group of file is nobody")

        # Disable root-squash
        ret = set_root_squash(self.mnode,
                              self.volname,
                              squash=False,
                              do_refresh_config=True)
        self.assertTrue(ret, "Failed to disable root-squash on volume")
        g.log.info("root-squash is disable on the volume")

        # Edit file created by root user
        for mount_obj in self.mounts:
            ret = append_string_to_file(
                mount_obj.client_system,
                "%s/dir15/file15" % mount_obj.mountpoint, 'hello')
            self.assertTrue(
                ret, "Unexpected:root user should be allowed to "
                "edit the file created by root user")
            g.log.info("Successful:root user successful in editing file "
                       "created by root user")
    def test_subdir_with_addbrick(self):

        # pylint: disable=too-many-statements
        """
        Mount the volume
        Create 2 subdir on mount point, subdir1 and subdir2
        Auth allow - Client1(subdir1,subdir2),Client2(subdir1,subdir2)
        Mount the subdir1 on client 1 and subdir2 on client2
        Start IO's on both subdirs
        Perform add-brick and rebalance
        """

        # Create  directories subdir1 and subdir2 on mount point
        ret = mkdir(self.mounts[0].client_system,
                    "%s/subdir1" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'subdir1' on"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        ret = mkdir(self.mounts[0].client_system,
                    "%s/subdir2" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'subdir2' on"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes Unmount failed")
        g.log.info("Volumes Unmounted successfully")

        # Set authentication on the subdirectory subdir1
        # and subdir2 to access by 2 clients
        g.log.info(
            'Setting authentication on subdir1 and subdir2'
            'for client %s and %s', self.clients[0], self.clients[0])
        ret = set_auth_allow(
            self.volname, self.mnode, {
                '/subdir1': [self.clients[0], self.clients[1]],
                '/subdir2': [self.clients[0], self.clients[1]]
            })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        # Creating mount list for subdirectories
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/subdir1" % self.volname
        self.subdir_mounts[1].volname = "%s/subdir2" % self.volname

        # Mount Subdirectory "subdir1" on client 1 and "subdir2" on client 2
        for mount_obj in self.subdir_mounts:
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount  %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully mounted subdirectories on client1"
                   "and clients 2")

        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.subdir_mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.subdir_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Start add-brick (subvolume-increase)
        g.log.info("Start adding bricks to volume when IO in progress")
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, ("Failed to expand the volume when IO in "
                              "progress on volume %s", self.volname))
        g.log.info(
            "Expanding volume when IO in progress is successful on "
            "volume %s", self.volname)

        # Log Volume Info and Status after expanding the volume
        g.log.info("Logging volume info and Status after expanding volume")
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Logging volume info and status failed on "
                              "volume %s", self.volname))
        g.log.info("Successful in logging volume info and status of volume %s",
                   self.volname)

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("All process  for volume %s are not"
                              "online", self.volname))
        g.log.info("All volume %s processes are now online", self.volname)

        # Start Rebalance
        g.log.info("Starting Rebalance on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
                                  "%s", self.volname))
        g.log.info("Successfully started rebalance on the volume %s",
                   self.volname)

        # Wait for rebalance to complete
        g.log.info("Waiting for rebalance to complete")
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname, 600)
        self.assertTrue(
            ret, "Rebalance did not complete "
            "despite waiting for 10 minutes")
        g.log.info("Rebalance successfully completed on the volume %s",
                   self.volname)

        # Again validate if subdirectories are still mounted post add-brick

        for mount_obj in self.subdir_mounts:
            ret = mount_obj.is_mounted()
            self.assertTrue(
                ret, ("Subdirectory %s is not mounted on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Subdirectory %s is mounted on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully validated that subdirectories are mounted"
                   "on client1 and clients 2 post add-brick operation")
Example #7
0
    def test_snapshot_while_rebalance(self):
        # pylint: disable=too-many-statements, missing-docstring
        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Create one snapshot of volume using no-timestamp option
        cmd_str = ("gluster snapshot create %s %s %s" %
                   ("snapy", self.volname, "no-timestamp"))
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snapy created successfully "
                   "for volume %s", self.volname)

        # Check for no of snaps using snap_list it should be 1
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            1, len(snap_list), "Expected 1 snapshot "
            "found %s snapshots" % len(snap_list))
        g.log.info("Successfully validated number of snaps.")

        # validate snap name
        self.assertIn("snapy", snap_list, " snap not found")
        g.log.info("Successfully validated names of snap")

        # get the bricks for the volume
        g.log.info("Fetching bricks for the volume : %s", self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List : %s", bricks_list)

        # expanding volume
        g.log.info("Start adding bricks to volume %s", self.volname)
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, ("Failed to add bricks to "
                              "volume %s " % self.volname))
        g.log.info("Add brick successful")

        # Log Volume Info and Status after expanding the volume
        g.log.info("Logging volume info and Status after expanding volume")
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Logging volume info and status failed "
                              "on volume %s", self.volname))
        g.log.info(
            "Successful in logging volume info and status "
            "of volume %s", self.volname)

        # Verify volume's all process are online for 60 sec
        g.log.info("Verifying volume's all process are online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname,
                                                   60)
        self.assertTrue(ret, ("Volume %s : All process are not "
                              "online", self.volname))
        g.log.info("Successfully Verified volume %s "
                   "processes are online", self.volname)

        # Start Rebalance
        g.log.info("Starting Rebalance on the volume")
        ret, _, err = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0,
                         ("Failed to start rebalance on "
                          "the volume %s with error %s" % (self.volname, err)))
        g.log.info("Successfully started rebalance on the "
                   "volume %s", self.volname)

        # Log Rebalance status
        g.log.info("Log Rebalance status")
        ret, _, _ = rebalance_status(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to log rebalance status")
        g.log.info("successfully logged rebalance status")

        # Create one snapshot of volume during rebalance
        cmd_str = ("gluster snapshot create %s %s %s" %
                   ("snapy_rebal", self.volname, "no-timestamp"))
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertNotEqual(ret, 0, ("successfully created 'snapy_rebal'"
                                     " for %s" % self.volname))
        g.log.info("Snapshot 'snapy_rebal' not created as rebalance is in "
                   "progress check log")
        # Check for no of snaps using snap_list it should be 1
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            1, len(snap_list), "Expected 1 snapshot "
            "found %s snapshot" % len(snap_list))
        g.log.info("Successfully validated number of snaps.")

        # Wait for rebalance to complete
        g.log.info("Waiting for rebalance to complete")
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
        self.assertTrue(ret, ("Rebalance is not yet complete "
                              "on the volume %s", self.volname))
        g.log.info("Rebalance is successfully complete on "
                   "the volume %s", self.volname)

        # Check Rebalance status after rebalance is complete
        g.log.info("Checking Rebalance status")
        ret, _, _ = rebalance_status(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to get rebalance status for "
                                  "the volume %s", self.volname))
        g.log.info("Successfully got rebalance status of the "
                   "volume %s", self.volname)

        # Create one snapshot of volume post rebalance with same name
        cmd_str = ("gluster snapshot create %s %s %s" %
                   ("snapy_rebal", self.volname, "no-timestamp"))
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot for %s" % self.volname))
        g.log.info(
            "Snapshot snapy_rebal created successfully "
            "for volume  %s", self.volname)

        # Check for no of snaps using snap_list it should be 2
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            2, len(snap_list), "Expected 2 snapshots "
            "found %s snapshot" % len(snap_list))
        g.log.info("Successfully validated number of snaps.")

        # validate snap name
        self.assertIn("snapy_rebal", snap_list, " snap not found")
        g.log.info("Successfully validated names of snap")
Example #8
0
    def test_validate_snaps_restore(self):
        # pylint: disable=too-many-statements
        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Setting some volume option related to snapshot
        option_before_restore = {
            'volumeConfig': [{
                'softLimit': '100',
                'effectiveHardLimit': '200',
                'hardLimit': '256'
            }],
            'systemConfig': {
                'softLimit': '90%',
                'activateOnCreate': 'disable',
                'hardLimit': '256',
                'autoDelete': 'disable'
            }
        }
        ret = set_snap_config(self.mnode, option_before_restore)
        self.assertTrue(ret,
                        ("Failed to set vol option on  %s" % self.volname))
        g.log.info("Volume options for%s is set successfully", self.volname)

        # Get brick list before taking snap_restore
        bricks_before_snap_restore = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List before snap restore "
                   "volume: %s", bricks_before_snap_restore)

        # Creating snapshot
        ret = snap_create(self.mnode, self.volname, "snap1")
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap1 created successfully for volume  %s",
                   self.volname)

        # Again start IO on all mounts.
        all_mounts_procs = []
        count = 1000
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Reset volume to make sure volume options will reset
        ret = volume_reset(self.mnode, self.volname, force=False)
        self.assertTrue(ret, ("Failed to reset %s" % self.volname))
        g.log.info("Reset Volume %s is Successful", self.volname)

        # Removing one brick
        g.log.info("Starting volume shrink")
        ret = shrink_volume(self.mnode, self.volname, force=True)
        self.assertTrue(ret, ("Failed to shrink the volume on "
                              "volume %s", self.volname))
        g.log.info("Shrinking volume is successful on "
                   "volume %s", self.volname)

        # Restore snapshot
        ret = snap_restore_complete(self.mnode, self.volname, "snap1")
        self.assertTrue(ret, ("Failed to restore snap snap1 on the "
                              "volume %s", self.volname))
        g.log.info(
            "Restore of volume is successful from snap1 on "
            "volume  %s", self.volname)

        # Validate volume is up and running
        g.log.info("Verifying volume is up and process are online")
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(
            ret, ("Volume %s : All process are not online", self.volname))
        g.log.info("Volume %s : All process are online", self.volname)

        # Get volume options post restore
        option_after_restore = get_snap_config(self.mnode)
        # Compare volume options
        self.assertNotEqual(option_before_restore, option_after_restore,
                            "Volume Options are not same after snap restore")

        # Get brick list post restore
        bricks_after_snap_restore = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List after snap restore "
                   "volume: %s", bricks_after_snap_restore)
        # Compare brick_list
        self.assertNotEqual(bricks_before_snap_restore,
                            bricks_after_snap_restore,
                            "Bricks are not same after snap restore")

        # Creating snapshot
        ret = snap_create(self.mnode, self.volname, "snap2")
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap2 created successfully for volume  %s",
                   self.volname)

        # Again start IO on all mounts after restore
        all_mounts_procs = []
        count = 1000
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")
Example #9
0
    def test_nfs_ganesha_add_brick(self):
        """
        Verify add brick functionality when IO is running
        Steps:
        1. Start IO on mount points
        2. Add bricks to expand the volume
        3. Start rebalance and wait for its completion
        4. Validate IOs
        """
        # Start IO on all mount points
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Start add-brick
        g.log.info("Start adding bricks to volume when IO in progress")
        ret = expand_volume(self.mnode, self.volname, self.servers,
                            self.all_servers_info)
        self.assertTrue(ret, ("Failed to expand the volume when IO in "
                              "progress on volume %s", self.volname))
        g.log.info(
            "Expanding volume when IO in progress is successful on "
            "volume %s", self.volname)

        # Log volume info and status after expanding the volume
        g.log.info("Logging volume info and Status after expanding volume")
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Logging volume info and status failed on "
                              "volume %s", self.volname))
        g.log.info("Successful in logging volume info and status of volume %s",
                   self.volname)

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("All process  for volume %s are not"
                              "online", self.volname))
        g.log.info("All volume %s processes are now online", self.volname)

        # Start rebalance
        g.log.info("Starting Rebalance on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to start rebalance on the volume "
                                  "%s", self.volname))
        g.log.info("Successfully started rebalance on the volume %s",
                   self.volname)

        # Wait for rebalance to complete
        g.log.info("Waiting for rebalance to complete")
        ret = wait_for_rebalance_to_complete(self.mnode, self.volname)
        self.assertTrue(
            ret, "Rebalance did not start "
            "despite waiting for 5 mins")
        g.log.info("Rebalance is successfully complete on the volume %s",
                   self.volname)

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")
    def test_subdir_with_replacebrick(self):

        # pylint: disable=too-many-statements
        """
        Mount the volume
        Create 50 directories on mount point
        Unmount volume
        Auth allow - Client1(subdir25),Client2(subdir15)
        Mount the subdir to their authorized respective clients
        Start IO's on both subdirs
        Perform replace-brick
        Validate on client if subdir's are mounted post replace-brick
        operation is performed
        Stat data on subdirs
        """
        # Create  directories on mount point
        for i in range(0, 50):
            ret = mkdir(self.mounts[0].client_system,
                        "%s/subdir%s" % (self.mounts[0].mountpoint, i))
            self.assertTrue(
                ret,
                ("Failed to create directory %s/subdir%s on"
                 "volume from client %s" %
                 (self.mounts[0].mountpoint, i, self.mounts[0].client_system)))
        g.log.info("Successfully created directories on mount point")

        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes Unmount failed")
        g.log.info("Volumes Unmounted successfully")

        # Set authentication on the subdirectory subdir25 to access by
        # client1 and subdir15 to access by 2 clients
        g.log.info(
            'Setting authentication on subdir25 and subdir15'
            'for client %s and %s', self.clients[0], self.clients[1])
        ret = set_auth_allow(
            self.volname, self.mnode, {
                '/subdir25': [self.mounts[0].client_system],
                '/subdir15': [self.mounts[1].client_system]
            })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        # Creating mount list for mounting selected subdirs on authorized
        # clients
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/subdir25" % self.volname
        self.subdir_mounts[1].volname = "%s/subdir15" % self.volname

        # Mount Subdirectory subdir25 on client 1 and subdir15 on client 2
        for mount_obj in self.subdir_mounts:
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount  %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully mounted sub directories on"
                   "authenticated clients")

        # Start IO on all the subdir mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.subdir_mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Log Volume Info and Status before replacing brick from the volume.
        g.log.info(
            "Logging volume info and Status before replacing brick "
            "from the volume %s", self.volname)
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Logging volume info and status failed on "
                              "volume %s", self.volname))
        g.log.info("Successful in logging volume info and status of volume %s",
                   self.volname)

        # Replace brick from a sub-volume
        g.log.info("Replace a brick from the volume")
        ret = replace_brick_from_volume(self.mnode, self.volname, self.servers,
                                        self.all_servers_info)
        self.assertTrue(ret, "Failed to replace  brick from the volume")
        g.log.info("Successfully replaced brick from the volume")

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("All volume %s processes failed to come up "
                              "online", self.volname))
        g.log.info("All volume %s processes came up "
                   "online successfully", self.volname)

        # Log Volume Info and Status after replacing the brick
        g.log.info(
            "Logging volume info and Status after replacing brick "
            "from the volume %s", self.volname)
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed Logging volume info and status on "
                              "volume %s", self.volname))
        g.log.info("Successful in logging volume info and status of volume %s",
                   self.volname)

        # Wait for self-heal to complete
        g.log.info("Wait for self-heal to complete")
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal has not yet completed')
        g.log.info("self-heal is successful after replace-brick operation")

        # Again validate if subdirectories are still mounted post replace-brick
        for mount_obj in self.subdir_mounts:
            ret = mount_obj.is_mounted()
            self.assertTrue(
                ret, ("Subdirectory %s is not mounted on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Subdirectory %s is mounted on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully validated that subdirectories are mounted"
                   "on client1 and clients 2 post replace-brick operation")

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.subdir_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")
    def test_nfs_ganesha_replace_brick(self):
        """
        Verify replace brick operation while IO is running
        Steps:
        1. Start IO on mount points
        2. Perofrm replace brick operation
        3. Validate IOs
        4. Get stat of files and dris
        """
        # pylint: disable=too-many-statements
        # Start IO on all mount points
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Perform replace brick operation
        g.log.info("Replace a brick from the volume")
        ret = replace_brick_from_volume(self.mnode, self.volname, self.servers,
                                        self.all_servers_info)
        self.assertTrue(ret, "Failed to replace  brick from the volume")
        g.log.info("Replace brick operation successful")

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online after replace "
                   "brick operation")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("All volume %s processes failed to come up "
                              "online", self.volname))
        g.log.info(
            "All volume %s processes came up "
            "online successfully after replace brick operation", self.volname)

        # Log volume info and status
        g.log.info(
            "Logging volume info and status after replacing brick "
            "from the volume %s", self.volname)
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to log volume info and status of "
                              "volume %s", self.volname))
        g.log.info("Successful in logging volume info and status of volume %s",
                   self.volname)

        # Wait for self-heal to complete
        g.log.info("Wait for self-heal to complete")
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal has not yet completed')
        g.log.info("Self-heal is successful after replace-brick operation")

        # Validate IOs
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")
Example #12
0
    def test_uss_snap_restore(self):
        """
        Description:
            This test case will validate USS after Snapshot restore.
            The restored snapshot should not be listed under the '.snaps'
            directory.

        * Perform I/O on mounts
        * Enable USS on volume
        * Validate USS is enabled
        * Create a snapshot
        * Activate the snapshot
        * Perform some more I/O
        * Create another snapshot
        * Activate the second
        * Restore volume to the second snapshot
        * From mount point validate under .snaps
          - first snapshot should be listed
          - second snapshot should not be listed
        """

        # pylint: disable=too-many-statements
        # Perform I/O
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name firstfiles %s" %
               (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete and validate IO
        self.assertTrue(
            wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
            "IO failed on %s" % self.mounts[0])
        g.log.info("IO is successful on all mounts")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Validate USS is enabled
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, "USS is disabled on volume %s" % self.volname)
        g.log.info("USS enabled on volume %s", self.volname)

        # Create a snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[0])
        self.assertEqual(ret, 0,
                         ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume  %s",
                   self.snapshots[0], self.volname)

        # Check for number of snaps using snap_list it should be 1 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            1, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")

        # Activate the snapshot
        ret, _, _ = snap_activate(self.mnode, self.snapshots[0])
        self.assertEqual(
            ret, 0, ("Failed to activate snapshot %s" % self.snapshots[0]))
        g.log.info("Snapshot %s activated successfully", self.snapshots[0])

        # Perform I/O
        self.all_mounts_procs = []
        cmd = ("/usr/bin/env python %s create_files "
               "-f 10 --base-file-name secondfiles %s" %
               (self.script_upload_path, self.mounts[0].mountpoint))
        proc = g.run_async(self.mounts[0].client_system,
                           cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete and validate IO
        self.assertTrue(
            wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
            "IO failed on %s" % self.mounts[0])
        g.log.info("IO is successful on all mounts")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Create another snapshot
        ret, _, _ = snap_create(self.mnode, self.volname, self.snapshots[1])
        self.assertEqual(
            ret, 0, ("Failed to create snapshot for volume %s" % self.volname))
        g.log.info("Snapshot %s created successfully for volume  %s",
                   self.snapshots[1], self.volname)

        # Check for number of snaps using snap_list it should be 2 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            2, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")

        # Activate the second snapshot
        ret, _, _ = snap_activate(self.mnode, self.snapshots[1])
        self.assertEqual(
            ret, 0, ("Failed to activate snapshot %s" % self.snapshots[1]))
        g.log.info("Snapshot %s activated successfully", self.snapshots[1])

        # Restore volume to the second snapshot
        ret = snap_restore_complete(self.mnode, self.volname,
                                    self.snapshots[1])
        self.assertTrue(ret, ("Failed to restore snap %s on the "
                              "volume %s" % (self.snapshots[1], self.volname)))
        g.log.info("Restore of volume is successful from %s on "
                   "volume %s", self.snapshots[1], self.volname)

        # Verify all volume processes are online
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(ret, "Failed: All volume processes are not online")
        g.log.info("All volume processes are online")
        ret = is_snapd_running(self.mnode, self.volname)
        self.assertTrue(
            ret, "Failed: snapd is not running for volume %s" % self.volname)
        g.log.info("Successful: snapd is running")

        # List activated snapshots under the .snaps directory
        snap_dir_list = get_uss_list_snaps(self.mounts[0].client_system,
                                           self.mounts[0].mountpoint)
        self.assertIsNotNone(
            snap_dir_list, "Failed to list snapshots under .snaps directory")
        g.log.info("Successfully gathered list of snapshots under the .snaps"
                   " directory")

        # Check for first snapshot as it should get listed here
        self.assertIn(self.snapshots[0], snap_dir_list,
                      ("Unexpected : %s not listed under .snaps "
                       "directory" % self.snapshots[0]))
        g.log.info("Activated Snapshot %s listed Successfully",
                   self.snapshots[0])

        # Check for second snapshot as it should not get listed here
        self.assertNotIn(self.snapshots[1], snap_dir_list,
                         ("Unexpected : %s listed in .snaps "
                          "directory" % self.snapshots[1]))
        g.log.info("Restored Snapshot %s not listed ", self.snapshots[1])
Example #13
0
    def test_volume_sanity(self):
        """
        This test verifies that files/directories creation or
        deletion doesn't leave behind any stale spaces
        """
        self.all_mounts_procs = []
        # Mount Volume
        g.log.info("Starting to Mount Volume %s", self.volname)
        ret = self.mount_volume(self.mounts)
        self.assertTrue(ret, ("Failed to Mount Volume %s" % self.volname))
        g.log.info("Successful in Mounting Volume %s", self.volname)

        # Get the list of directories under .glusterfs before
        # creating any files
        before_creating_files = self._check_any_stale_space_present()

        # Creating files on client side
        for mount_object in self.mounts:
            g.log.info("Generating data for %s:%s", mount_object.client_system,
                       mount_object.mountpoint)
            # Create files
            g.log.info('Creating files...')
            command = (
                "python %s create_files -f 100 --fixed-file-size 1k %s" %
                (self.script_upload_path, mount_object.mountpoint))

            proc = g.run_async(mount_object.client_system,
                               command,
                               user=mount_object.user)
            self.all_mounts_procs.append(proc)

        # Creating directories in the mount point
        for mount_object in self.mounts:
            g.log.info("Creating Directories on %s:%s",
                       mount_object.client_system, mount_object.mountpoint)
            cmd = ("python %s create_deep_dir -d 0 -l 10 %s" %
                   (self.script_upload_path, mount_object.mountpoint))

            proc = g.run_async(mount_object.client_system,
                               cmd,
                               user=mount_object.user)
            self.all_mounts_procs.append(proc)

        # Creating hard links
        for mount_object in self.mounts:
            g.log.info("Creating hard links on %s:%s",
                       mount_object.client_system, mount_object.mountpoint)
            cmd = ("python %s create_hard_links --dest-dir %s %s" %
                   (self.script_upload_path, mount_object.mountpoint,
                    mount_object.mountpoint))
            proc = g.run_async(mount_object.client_system,
                               command,
                               user=mount_object.user)
            self.all_mounts_procs.append(proc)

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Remove the files, directories and hard-links which created
        for mount_object in self.mounts:
            cmd = ("cd %s; rm -rf *" % mount_object.mountpoint)
            g.log.info("Running cmd %s nodes %s", cmd,
                       mount_object.client_system)
            ret, _, _ = g.run(mount_object.client_system, cmd)
            self.assertEqual(
                ret, 0, "Failed to delete the files/dir on"
                " %s" % mount_object.client_system)
        g.log.info("Succesfully deleted all the files")

        # Get the list of directories under .glusterfs after
        # deleting files on client side
        after_del_files = self._check_any_stale_space_present()

        # Compare the output before and after running io's
        self.assertListEqual(
            before_creating_files, after_del_files,
            "Both list are not equal.\n Before creating"
            " file:%s\n After deleting file: "
            "%s" % (before_creating_files, after_del_files))
        g.log.info(
            "Both list are equal. Before creating file:%s "
            "After deleting file :%s", before_creating_files, after_del_files)

        # UnMount Volume
        g.log.info("Starting to Unmount Volume %s", self.volname)
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
        g.log.info("Successfully Unmounted Volume %s", self.volname)
    def test_validate_snaps_256(self):
        """
        Validate snapshot creation for 256 snapshots

        * Perform some IO
        * Set snapshot config option snap-max-hard-limit to 256
        * Create 256 snapshots
        * Verify 256 created successfully
        * Create 257th snapshot - creation should fail as it will
          exceed the hard-limit
        * Verify snapshot list for 256 snapshots

        """
        # pylint: disable=too-many-statements
        # Start IO on all mounts
        cmd = (
            "/usr/bin/env python %s create_files "
            "-f 10 --base-file-name firstfiles %s"
            % (self.script_upload_path,
               self.mounts[0].mountpoint))
        proc = g.run_async(
            self.mounts[0].client_system, cmd, user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Wait for IO to complete
        self.assertTrue(
            wait_for_io_to_complete(self.all_mounts_procs, self.mounts[0]),
            "IO failed on %s" % self.mounts[0])
        g.log.info("IO is successful on all mounts")

        # Perform stat on all the files/dirs created
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully performed stat on all files/dirs created")

        # Set config option snap-max-hard-limit to 256
        # This is to make sure to override
        max_hard_limit = {'snap-max-hard-limit': '256'}
        ret, _, _ = set_snap_config(self.mnode, max_hard_limit)
        self.assertEqual(ret, 0, "Failed to set snapshot config option "
                         "snap-max-hard-limit to 256")
        g.log.info("Successfully set snapshot config option "
                   "snap-max-hard-limit to 256")

        # Create 256 snapshots
        for snapname in self.snapshots:
            ret, _, _ = snap_create(self.mnode, self.volname, snapname)
            self.assertEqual(ret, 0, ("Failed to create snapshot %s for %s"
                                      % (snapname, self.volname)))
            sleep(1)
        g.log.info("Snapshots created successfully for volume %s",
                   self.volname)

        # Validate snapshot list for 256 snapshots
        snap_list = get_snap_list(self.mnode)
        self.assertTrue((len(snap_list) == 256), "Failed: Number of snapshots "
                        "is not consistent for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")

        # Validate snapshot existence using snap-name
        for snapname in self.snapshots:
            self.assertIn(snapname, snap_list,
                          "Failed: Snapshot %s not found" % snapname)
        g.log.info("Successfully validated snapshots existence using "
                   "snap-name")

        # Try to exceed snap-max-hard-limit by creating 257th snapshot
        snap_257 = "snap-test-validate-256-snapshots-%s-257" % (self.volname)
        ret, _, _ = snap_create(self.mnode, self.volname, snap_257)
        self.assertEqual(
            ret, 1, ("Unexpected: Successfully created %s for  volume %s"
                     % (snap_257, self.volname)))
        g.log.info("Snapshot %s not created as it exceeds the "
                   "snap-max-hard-limit", snap_257)

        # Validate snapshot list for 256 snapshots
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(len(snap_list), 256, "Failed: Number of snapshots "
                         "is not consistent for volume %s" % self.volname)
        g.log.info("Successfully validated number of snapshots")
Example #15
0
    def test_root_squash_enable(self):
        """
        Tests to verify Nfs Ganesha rootsquash functionality with multi
        client
        Steps:
        1. Create some directories on mount point.
        2. Create some files inside those directories
        3. Set permission as 777 for mount point
        4. Enable root-squash on volume
        5. Edit file created by root user from client 2
           It should not allow to edit the file
        6. Create some directories on mount point.
        7. Create some files inside the directories
           Files and directories will be created by
           nfsnobody user
        8. Edit the file created in step 7
           It should allow to edit the file
        9. Disable root squash
        10. Edit the file created at step 7
            It should allow to edit the file
        """
        # Create Directories on Mount point
        cmd = ("for i in {1..10}; do mkdir %s/dir$i; done" %
               self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system,
                            cmd,
                            user=self.mounts[0].user)
        self.assertEqual(ret, 0, err)

        # Create files inside directories on mount point.
        cmd = ("for i in {1..10}; do touch %s/dir$i/file$i; done" %
               self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system,
                            cmd,
                            user=self.mounts[0].user)
        self.assertEqual(ret, 0, err)

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successful in getting stats of files/dirs "
                   "from mount point")

        # Set mount point permission to 777
        ret = set_file_permissions(self.mounts[0].client_system,
                                   self.mounts[0].mountpoint, 777)
        self.assertTrue(ret, "Failed to set permission for directory")
        g.log.info("Successfully set permissions for directory")

        # Enable root-squash on volume
        ret = set_root_squash(self.servers[0], self.volname)
        self.assertTrue(ret, "Failed to enable root-squash on volume")
        g.log.info("root-squash is enable on the volume")

        # Edit file created by root user from client 2
        ret = append_string_to_file(
            self.mounts[1].client_system,
            "%s/dir5/file5" % self.mounts[1].mountpoint, 'hello')
        self.assertFalse(
            ret, "Unexpected:nfsnobody user editing file "
            "created by root user should FAIL")
        g.log.info("Successful:nfsnobody user failed to edit file "
                   "created by root user")

        # Create Directories on Mount point
        cmd = ("for i in {1..10}; do mkdir %s/SquashDir$i; done" %
               self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system,
                            cmd,
                            user=self.mounts[0].user)
        self.assertEqual(ret, 0, err)

        # Create files inside directories on mount point
        cmd = ("for i in {1..10}; do touch %s/SquashDir$i/Squashfile$i;"
               "done" % self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system,
                            cmd,
                            user=self.mounts[0].user)
        self.assertEqual(ret, 0, err)

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successful in getting stats of files/dirs "
                   "from mount point")

        # Edit the file created by nfsnobody user from client 2
        ret = append_string_to_file(
            self.mounts[1].client_system,
            "%s/SquashDir5/Squashfile5" % self.mounts[1].mountpoint, 'hello')
        self.assertTrue(
            ret, "Unexpected:nfsnobody user failed to edit "
            "the file created by nfsnobody user")
        g.log.info("Successful:nfsnobody user successfully edited the "
                   "file created by nfsnobody user")

        # Disable root-squash
        ret = set_root_squash(self.servers[0],
                              self.volname,
                              squash=False,
                              do_refresh_config=True)
        self.assertTrue(ret, "Failed to disable root-squash on volume")
        g.log.info("root-squash is disabled on the volume")

        # Edit the file created by nfsnobody user from root user
        ret = append_string_to_file(
            self.mounts[1].client_system,
            "%s/SquashDir10/Squashfile10" % self.mounts[1].mountpoint, 'hello')
        self.assertTrue(
            ret, "Unexpected:root user failed to edit "
            "the file created by nfsnobody user")
        g.log.info("Successful:root user successfully edited the "
                   "file created by nfsnobody user")
    def test_node_reboot_subdir_mounted_io_running(self):
        """
        Verify node reboot operation when sub-dirs are mounted and IOs are
        running

        Steps:
        1. Create two sub-directories on mounted volume.
        2. Un mount volume from clients.
        3. Set auth.allow on sub dir d1 for client1 and d2 for client2.
        4. Mount sub-dir d1 on client1 and d2 on client2.
        5. Perform IO on mounts.
        6. Reboot the node from which sub-dirs are
           mounted and wait for node to come up.
        7. Verify if peers are connected.
        8. Check whether bricks are online.
        9. Validate IO process.
        """
        # Creating two sub directories on mounted volume
        ret = mkdir(self.mounts[0].client_system,
                    "%s/d1" % self.mounts[0].mountpoint)
        self.assertTrue(ret, ("Failed to create directory 'd1' in volume %s "
                              "from client %s" %
                              (self.volname, self.mounts[0].client_system)))
        ret = mkdir(self.mounts[0].client_system,
                    "%s/d2" % self.mounts[0].mountpoint)
        self.assertTrue(ret, ("Failed to create directory 'd2' in volume %s "
                              "from client %s" %
                              (self.volname, self.mounts[0].client_system)))

        # Unmounting volumes
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Failed to unmount one or more volumes")
        g.log.info("Successfully unmounted all volumes")

        # Setting authentication for directories
        auth_dict = {
            '/d1': [self.mounts[0].client_system],
            '/d2': [self.mounts[1].client_system]
        }
        ret = set_auth_allow(self.volname, self.mnode, auth_dict)
        self.assertTrue(ret, "Failed to set authentication")
        g.log.info("Successfully set authentication on sub directories")

        # Creating mounts list
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/d1" % self.volname
        self.subdir_mounts[1].volname = "%s/d2" % self.volname

        # Mounting sub directories to authenticated clients
        for mount_obj in self.subdir_mounts:
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount sub directory %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted sub directory %s on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully mounted sub directories to authenticated "
                   "clients")

        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.subdir_mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Reboot node and wait for node to come up.
        ret, _ = reboot_nodes_and_wait_to_come_online(self.mnode)
        self.assertTrue(
            ret, "Node reboot failed. Node %s has not came up" % self.mnode)

        # Check whether peers are in connected state
        ret = self.validate_peers_are_connected()
        self.assertTrue(ret, "All nodes are not in connected state.")

        # Get the bricks list of the volume
        g.log.info("Fetching bricks list of the volume : %s", self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        g.log.info("Brick List : %s", bricks_list)

        # Check whether all bricks are online
        g.log.info("Verifying whether all bricks are online.")
        ret = are_bricks_online(self.mnode, self.volname, bricks_list)
        self.assertTrue(ret, "All bricks are not online.")
        g.log.info("All bricks are online.")

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.subdir_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Unmount sub-directories
        ret = self.unmount_volume(self.subdir_mounts)
        self.assertTrue(ret, "Failed to unmount one or more sub-directories")
        g.log.info("Successfully unmounted all sub-directories")
    def test_rootsquash_enable(self):
        # Start IO on mount point.
        self.all_mounts_procs = []
        cmd = ("for i in {1..10}; do touch %s/file$i; done" %
               self.mounts[0].mountpoint)
        proc = g.run_async(self.mounts[0].client_system,
                           cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Validate IO
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfull in getting stats of files/dirs "
                   "from mount point")

        # Check for owner and group of random file
        for mount_obj in self.mounts:
            cmd = ("ls -l %s/file5 | awk '{ print $3, $4 }' |sort" %
                   mount_obj.mountpoint)
            ret, out, err = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, err)
            self.assertIn("root root", out, "Owner and group is not ROOT")
            g.log.info("Owner and group of file is ROOT")

        # Set mount point permission to 777
        for mount_obj in self.mounts:
            cmd = ("chmod 777 %s" % mount_obj.mountpoint)
            ret, _, err = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, err)
            g.log.info("Mount point permission changed to 777")

        # Enable root-squash on volume
        ret = set_root_squash(self.servers[0], self.volname)
        self.assertTrue(ret, "Failed to enable root-squash on volume")
        g.log.info("root-squash is enable on the volume")

        # Start IO on mount point.
        self.all_mounts_procs = []
        cmd = ("for i in {1..10}; do touch %s/Squashfile$i; done" %
               self.mounts[0].mountpoint)
        proc = g.run_async(self.mounts[0].client_system,
                           cmd,
                           user=self.mounts[0].user)
        self.all_mounts_procs.append(proc)

        # Validate IO
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfull in getting stats of files/dirs "
                   "from mount point")

        # Check for owner and group of random file
        for mount_obj in self.mounts:
            cmd = ("ls -l %s/Squashfile5 | awk '{print $3, $4}' | sort" %
                   mount_obj.mountpoint)
            ret, out, err = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, err)
            self.assertIn("nfsnobody nfsnobody", out,
                          "Owner and group of file is NOT NFSNOBODY")
            g.log.info("Owner and group of file is NFSNOBODY")

        # Edit file created by root user
        for mount_obj in self.mounts:
            cmd = ("echo hello > %s/file10" % mount_obj.mountpoint)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertEqual(
                ret, 1, "nfsnobody user editing file created by "
                "root user should FAIL")
            g.log.info("nfsnobody user failed to edit file "
                       "created by root user")
Example #18
0
    def test_root_squash_enable(self):
        """
        Tests to verify Nfs Ganesha rootsquash functionality when volume
        is restarted
        Steps:
        1. Create some files and dirs inside mount point
        2. Set permission as 777 for mount point
        3. Enable root-squash on volume
        4. Create some more files and dirs
        5. Restart volume
        6. Try to edit file created in step 1
           It should not allow to edit the file
        7. Try to edit the file created in step 5
           It should allow to edit the file
        """
        # Start IO on mount point.
        cmd = ("for i in {1..10}; do touch %s/file$i; done" %
               self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system,
                            cmd,
                            user=self.mounts[0].user)
        self.assertEqual(ret, 0, err)

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successful in getting stats of files/dirs "
                   "from mount point")

        # Set mount point permission to 777
        ret = set_file_permissions(self.mounts[0].client_system,
                                   self.mounts[0].mountpoint, 777)
        self.assertTrue(ret, "Failed to set permission for directory")
        g.log.info("Successfully set permissions for directory")

        # Enable root-squash on volume
        ret = set_root_squash(self.servers[0], self.volname)
        self.assertTrue(ret, "Failed to enable root-squash on volume")
        g.log.info("root-squash is enable on the volume")

        # Start IO on mount point.
        cmd = ("for i in {1..10}; do touch %s/Squashfile$i; done" %
               self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system,
                            cmd,
                            user=self.mounts[0].user)
        self.assertEqual(ret, 0, err)

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successful in getting stats of files/dirs "
                   "from mount point")

        # Stopping volume
        ret = volume_stop(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to stop volume %s" % self.volname))
        g.log.info("Successful in stopping volume %s" % self.volname)

        # Waiting for few seconds for volume unexport. Max wait time is
        # 120 seconds.
        ret = wait_for_nfs_ganesha_volume_to_get_unexported(
            self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to unexport volume %s after "
                              "stopping volume" % self.volname))
        g.log.info("Volume is unexported successfully")

        # Starting volume
        ret = volume_start(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to start volume %s" % self.volname))
        g.log.info("Successful in starting volume %s" % self.volname)

        # Waiting for few seconds for volume export. Max wait time is
        # 120 seconds.
        ret = wait_for_nfs_ganesha_volume_to_get_exported(
            self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to export volume %s after "
                              "starting volume" % self.volname))
        g.log.info("Volume is exported successfully")

        # Edit file created by root user
        for mount_obj in self.mounts:
            ret = append_string_to_file(mount_obj.client_system,
                                        "%s/file10" % mount_obj.mountpoint,
                                        'hello')
            self.assertFalse(
                ret, "Unexpected:nfsnobody user editing file "
                "created by root user should FAIL")
            g.log.info("Successful:nfsnobody user failed to edit file "
                       "created by root user")

        # Edit the file created by nfsnobody user
        for mount_obj in self.mounts:
            ret = append_string_to_file(
                mount_obj.client_system,
                "%s/Squashfile5" % mount_obj.mountpoint, 'hello')
            self.assertTrue(
                ret, "Unexpected:nfsnobody user failed to edit "
                "the file created by nfsnobody user")
            g.log.info("Successful:nfsnobody user successfully edited the "
                       "file created by nfsnobody user")
Example #19
0
    def test_dir_gfid_heal_on_all_subvols(self):
        """
        - Create a volume and mount it.
        - Create a directory on mount and check whether all the bricks have
          the same gfid.
        - Now delete gfid attr from all but one backend bricks,
        - Do lookup from the mount.
        - Check whether all the bricks have the same gfid assigned.
        """

        # Create a directory on the mount
        cmd = ("/usr/bin/env python %s create_deep_dir -d 0 -l 0 "
               "%s/dir1" %
               (self.script_upload_path, self.mounts[0].mountpoint))
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "Failed to create directory on mountpoint")
        g.log.info("Directory created successfully on mountpoint")

        # Verify gfids are same on all the bricks and get dir1 gfid
        bricks_list = get_all_bricks(self.mnode, self.volname)[1:]
        dir_gfid = self.verify_gfid_and_retun_gfid("dir1")

        # Delete gfid attr from all but one backend bricks
        for brick in bricks_list:
            brick_node, brick_path = brick.split(":")
            ret = delete_fattr(brick_node, '%s/dir1' % (brick_path),
                               'trusted.gfid')
            self.assertTrue(
                ret, 'Failed to delete gfid for brick '
                'path %s:%s/dir1' % (brick_node, brick_path))
            g.log.info("Successfully deleted gfid xattr for %s:%s/dir1",
                       brick_node, brick_path)
        g.log.info(
            "Successfully deleted gfid xattr for dir1 on the "
            "following bricks %s", str(bricks_list[1:]))

        # Trigger heal from mount point
        sleep(10)
        for mount_obj in self.mounts:
            g.log.info("Triggering heal for %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            command = ('cd %s; ls -l' % mount_obj.mountpoint)
            ret, _, _ = g.run(mount_obj.client_system, command)
            self.assertFalse(
                ret, 'Failed to run lookup '
                'on %s ' % mount_obj.client_system)
            sleep(10)

        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Failed to stat lookup on clients")
        g.log.info('stat lookup on clients succeeded')

        # Verify that all gfids for dir1 are same and get the gfid
        dir_gfid_new = self.verify_gfid_and_retun_gfid("dir1")
        self.assertTrue(
            all(gfid in dir_gfid for gfid in dir_gfid_new),
            'Previous gfid and new gfid are not equal, '
            'which is not expected, previous gfid %s '
            'and new gfid %s' % (dir_gfid, dir_gfid_new))
        g.log.info('gfid heal was successful from client lookup and all '
                   'backend bricks have same gfid xattr, no gfid mismatch')
    def test_validate_snaps_256(self):

        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # set config for 256 snpas (to make sure to override)
        cmd_str = ("gluster snapshot config snap-max-hard-limit 256"
                   " --mode=script")
        ret = g.run(self.mnode, cmd_str)
        self.assertTrue(ret, "Failed to set snap-max-hard-limit to 256.")
        g.log.info("snap-max-hard limit successfully set for 256.")

        # Create 256 snaps
        for i in range(1, 257, 1):
            cmd_str = "gluster snapshot create %s %s %s" % (
                "snapy%s" % i, self.volname, "no-timestamp")
            ret = g.run(self.mnode, cmd_str)
            self.assertTrue(
                ret, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot %s created successfully for volume  %s",
                       "snapy%s" % i, self.volname)

        # Check for no. of snaps using snap_list it should be 256
        snap_list = get_snap_list(self.mnode)
        self.assertTrue((len(snap_list) == 256), "No of snaps not consistent "
                        "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")

        # Validate all 256 snap names created during
        for i in range(1, 257, 1):
            self.assertTrue(("snapy%s" % i in snap_list), "%s snap not "
                            "found " % ("snapy%s" % i))
        g.log.info("Successfully validated names of snap")

        # Try to create 257th snapshot
        cmd_str = "gluster snapshot create %s %s %s" % ("snap", self.volname,
                                                        "no-timestamp")
        ret = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 1, ("Unexpected: Successfully created 'snap'"
                                  " for  volume %s" % self.volname))
        g.log.info("Snapshot 'snap' not created as it is 257th snap")

        # Check for no. of snaps using snap_list it should be 256
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            256, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")
Example #21
0
    def test_subdir_with_removebrick(self):

        # pylint: disable=too-many-statements
        """
        Mount the volume
        Create 2 subdir on client subdir1 and subdir2
        Auth allow - Client1(subdir1,subdir2),Client2(subdir1,subdir2)
        Mount the subdir to their respective clients
        Start IO's on both subdirs
        Perform remove-brick
        Validate on client if subdir's are mounted post remove-brick
        operation is performed
        """
        # Create  directories subdir1 and subdir2 on mount point
        ret = mkdir(self.mounts[0].client_system,
                    "%s/subdir1" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'subdir1' in"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        ret = mkdir(self.mounts[0].client_system,
                    "%s/subdir2" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'subdir2' in"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes UnMount failed")
        g.log.info("Volumes UnMounted successfully")

        # Set authentication on the subdirectory subdir1
        # and subdir2 to access by 2 clients
        g.log.info(
            'Setting authentication on subdir1 and subdir2'
            'for client %s and %s', self.clients[0], self.clients[0])
        ret = set_auth_allow(
            self.volname, self.mnode, {
                '/subdir1': [self.clients[0], self.clients[1]],
                '/subdir2': [self.clients[0], self.clients[1]]
            })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        self.mpoint = "/mnt/Mount_Point1"

        # Mount Subdir1 mount on client 1
        _, _, _ = mount_volume("%s/subdir1" % self.volname, self.mount_type,
                               self.mpoint, self.mnode, self.clients[0])

        # Checking subdir1 is mounted or not
        ret = is_mounted("%s/subdir1" % self.volname, self.mpoint, self.mnode,
                         self.clients[0], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir1", self.volname,
                   self.mpoint)

        # Mount Subdir2 mount on client 2
        _, _, _ = mount_volume("%s/subdir2" % self.volname, self.mount_type,
                               self.mpoint, self.mnode, self.clients[1])

        # Checking subdir2 is mounted or not
        ret = is_mounted("%s/subdir2" % self.volname, self.mpoint, self.mnode,
                         self.clients[1], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir2", self.volname,
                   self.mpoint)

        # Start IO on all the subdir mounts.
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/subdir1" % self.volname
        self.subdir_mounts[1].volname = "%s/subdir2" % self.volname
        all_mounts_procs = []
        count = 1
        for mount_obj in self.subdir_mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       self.mpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, self.mpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.subdir_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Perform remove brick operation when subdir is mounted on client
        g.log.info("Start removing bricks from volume")
        ret = shrink_volume(self.mnode, self.volname, rebalance_timeout=600)
        self.assertTrue(ret, ("Remove brick operation failed on "
                              "%s", self.volname))
        g.log.info("Remove brick operation is successful on "
                   "volume %s", self.volname)

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("All volume %s processes failed to come up "
                              "online", self.volname))
        g.log.info("All volume %s processes came up "
                   "online successfully", self.volname)

        # Log Volume Info and Status after performing remove brick
        g.log.info("Logging volume info and Status after shrinking volume")
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Logging volume info and status failed on "
                              "volume %s", self.volname))
        g.log.info("Successful in logging volume info and status of volume %s",
                   self.volname)

        # Again Checking subdir1 is mounted or not on Client 1
        ret = is_mounted("%s/subdir1" % self.volname, self.mpoint, self.mnode,
                         self.clients[0], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir1", self.volname,
                   self.mpoint)

        # Again Checking subdir2 is mounted or not on Client 2
        ret = is_mounted("%s/subdir2" % self.volname, self.mpoint, self.mnode,
                         self.clients[1], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir2", self.volname,
                   self.mpoint)
Example #22
0
    def test_new_volume_while_io_in_progress(self):
        """
        Create, export and mount new volume while IO running on mount of
        another volume
        Steps:
        1. Start IO on mount points
        2. Create another volume 'volume_new'
        3. Export volume_new through nfs-ganesha
        4. Mount the volume on clients
        """
        # pylint: disable=too-many-statements, too-many-locals
        # Start IO on all mount points
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        self.volname_new = '%s_new' % self.volname
        kwargs = {}
        dict_index = 0

        # Creating mounts list for mounting new volume
        self.mounts_new = []
        for mount_obj in self.mounts:
            self.mounts_new.append(deepcopy(mount_obj))
        for mount_obj in self.mounts_new:
            mount_obj.volname = self.volname_new
            mount_obj.mountpoint = '%s_new' % mount_obj.mountpoint

        # Fetch details for creating a replicate volume.
        replica_count = (
            self.default_volume_type_config['replicated']['replica_count'])
        servers_bricks_dict = get_servers_bricks_dict(self.all_servers,
                                                      self.all_servers_info)
        bricks_list = []
        kwargs['replica_count'] = replica_count
        kwargs['transport_type'] = (
            self.default_volume_type_config['replicated']['transport'])

        for num in range(0, replica_count):
            # Current_server is the server on which brick path will be created
            current_server = list(servers_bricks_dict.keys())[dict_index]
            current_server_unused_bricks_list = (list(
                servers_bricks_dict.values())[dict_index])
            if current_server_unused_bricks_list:
                brick_path = (
                    "%s:%s/%s_brick%s" %
                    (current_server, current_server_unused_bricks_list[0],
                     self.volname_new, num))
                bricks_list.append(brick_path)

                # Remove the added brick from the list
                list(servers_bricks_dict.values())[dict_index].pop(0)

            if dict_index < len(servers_bricks_dict) - 1:
                dict_index = dict_index + 1
            else:
                dict_index = 0

        # Create volume 'volume_new'
        ret, _, _ = volume_create(mnode=self.mnode,
                                  volname=self.volname_new,
                                  bricks_list=bricks_list,
                                  force=False,
                                  **kwargs)
        self.assertEqual(ret, 0,
                         "Unable to create volume %s" % self.volname_new)
        g.log.info("Successfully created volume %s", self.volname_new)

        ret, _, _ = volume_start(self.mnode, self.volname_new)
        self.assertEqual(ret, 0,
                         "Unable to start volume %s" % self.volname_new)

        # Wait for volume processes to be online
        g.log.info("Wait for volume %s processes to be online",
                   self.volname_new)
        ret = wait_for_volume_process_to_be_online(self.mnode,
                                                   self.volname_new)
        self.assertTrue(
            ret, "Wait timeout: Processes of volume %s are "
            "not online." % self.volname_new)
        g.log.info("Volume processes of volume %s are now online",
                   self.volname_new)

        # Export volume as nfs-ganesha export
        ret, _, _ = export_nfs_ganesha_volume(self.mnode, self.volname_new)
        self.assertEqual(
            ret, 0, "Failed to set ganesha.enable 'on' on "
            "volume %s" % self.volname_new)
        g.log.info(
            "Successful in setting ganesha.enable to 'on' on "
            "volume %s", self.volname_new)

        # Verify volume export
        ret = wait_for_nfs_ganesha_volume_to_get_exported(
            self.mnode, self.volname_new)
        self.assertTrue(
            ret, "Failed to export volume %s as nfs-ganesha "
            "export" % self.volname_new)
        g.log.info("Successfully exported volume %s", self.volname_new)

        # Mount the new volume
        for mount_obj in self.mounts_new:
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)

        # Verify mounts
        for mount_obj in self.mounts_new:
            ret = mount_obj.is_mounted()
            self.assertTrue(
                ret, ("Volume %s is not mounted on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Verified: Volume %s is mounted on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Export and mount of new volume %s is success.",
                   self.volname_new)

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all IO")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")
    def test_subdir_when_renamed(self):

        # pylint: disable=too-many-statements
        """
        Mount the volume
        Create 1 subdir on mountpoint "d1"
        Auth allow - Client1(d1),Client2(full volume)
        Mount the subdir "d1" on client1 and volume on client2
        Start IO's on all the mount points
        Perform rename operation from client2.Rename the subdir
        "d1" to "d1_renamed"
        unmount volume and subdir from clients
        Try mounting "d1" on client 1.This should fail.
        Try mounting "d1_renamed" on client 1.This should fail.
        Again set authentication.Auth allow -
        Client1(d1_renamed),Client2(full volume)
        Mount "d1_renamed" on client1 and volume on client2
        """

        # Create  directory d1 on mount point
        ret = mkdir(self.mounts[0].client_system,
                    "%s/d1" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'd1' on"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes Unmount failed")
        g.log.info("Volumes Unmounted successfully")

        # Set authentication on the subdirectoy "d1" to access by client1
        # and volume to access by client2
        g.log.info(
            'Setting authentication on subdirectory d1 to access'
            'by client %s and on volume to access by client %s',
            self.clients[0], self.clients[1])
        ret = set_auth_allow(self.volname, self.mnode, {
            '/d1': [self.clients[0]],
            '/': [self.clients[1]]
        })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        # Creating mount list for mounting subdir mount and volume
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/d1" % self.volname
        self.subdir_mounts[0].client_system = self.clients[0]
        self.subdir_mounts[1].client_system = self.clients[1]

        # Mount Subdirectory d1 on client 1 and volume on client 2
        for mount_obj in self.subdir_mounts:
            mountpoint = mount_obj.mountpoint
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount  %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully mounted sub directory and volume to"
                   "authenticated clients")

        # Start IO on all the mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.subdir_mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.subdir_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Rename the subdir "d1" to "d1_renamed" from client2
        source_fpath = "%s/d1" % mountpoint
        dest_fpath = "%s/d1_renamed" % mountpoint
        ret = move_file(self.clients[1], source_fpath, dest_fpath)
        self.assertTrue(ret, "Rename subdirectory failed")
        g.log.info('Renamed directory %s to %s', source_fpath, dest_fpath)

        # unmount volume and subdir from client
        ret = self.unmount_volume(self.subdir_mounts)
        self.assertTrue(ret, "Volumes UnMount failed")
        g.log.info("Volumes Unmounted successfully")

        # Try mounting subdir "d1" on client1
        _, _, _ = mount_volume("%s/d1" % self.volname, self.mount_type,
                               mountpoint, self.mnode, self.clients[0])

        ret = is_mounted(self.volname, mountpoint, self.mnode, self.clients[0],
                         self.mount_type)
        self.assertEqual(
            ret, 0, "d1 mount should have failed.But d1 is"
            "successfully mounted on mount point: %s" % mountpoint)
        g.log.info("subdir %s/d1 is not mounted as expected %s", self.volname,
                   mountpoint)

        # Try mounting subdir "d1_renamed" on client1
        _, _, _ = mount_volume("%s/d1_renamed" % self.volname, self.mount_type,
                               mountpoint, self.mnode, self.clients[0])

        ret = is_mounted("%s/d1_renamed" % self.volname, mountpoint,
                         self.mnode, self.clients[0], self.mount_type)
        self.assertEqual(
            ret, 0, "d1_renamed mount should have failed.But"
            "d1_renamed is successfully mounted on : %s" % mountpoint)
        g.log.info("subdir %s/d1_renamed is not mounted as expected %s",
                   self.volname, mountpoint)

        # Set authentication on the subdirectoy "d1_renamed" to access
        # by client1 and volume to access by client2
        g.log.info(
            'Setting authentication on subdirectory d1_renamed to'
            'access by client %s and on volume to access by client %s',
            self.clients[0], self.clients[1])
        ret = set_auth_allow(self.volname, self.mnode, {
            '/d1_renamed': [self.clients[0]],
            '/': [self.clients[1]]
        })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        # Overwriting the list of subdir mount, directory d1 to d1_renamed
        self.subdir_mounts[0].volname = "%s/d1_renamed" % self.volname

        # Mount Subdirectory d1_renamed on client 1 and volume on client 2
        for mount_obj in self.subdir_mounts:
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount  %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)

        g.log.info("Successfully mounted sub directory and volume to"
                   "authenticated clients")

        # Get stat of all the files/dirs created from both clients.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")
Example #24
0
    def test_restore_online_vol(self):

        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create volume
        2. Mount volume
        3. Perform I/O on mounts
        4. Create 1 snapshots snapy1
        5. Validate snap created
        6. Perform some more I/O
        7. Create 1 more snapshot snapy2
        8. Restore volume to snapy1
          -- Restore should fail with message
             "volume needs to be stopped before restore"
        """

        # Performing step 3 to 7 in loop here
        for i in range(1, 3):
            # Perform I/O
            g.log.info("Starting IO on all mounts...")
            self.counter = 1
            self.all_mounts_procs = []
            for mount_obj in self.mounts:
                g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                           mount_obj.mountpoint)
                cmd = ("python %s create_deep_dirs_with_files "
                       "--dirname-start-num %d "
                       "--dir-depth 2 "
                       "--dir-length 2 "
                       "--max-num-of-dirs 2 "
                       "--num-of-files 2 %s" %
                       (self.script_upload_path, self.counter,
                        mount_obj.mountpoint))

                proc = g.run_async(mount_obj.client_system,
                                   cmd,
                                   user=mount_obj.user)
                self.all_mounts_procs.append(proc)
            self.io_validation_complete = False

            # Validate IO
            self.assertTrue(
                validate_io_procs(self.all_mounts_procs, self.mounts),
                "IO failed on some of the clients")
            self.io_validation_complete = True

            # Get stat of all the files/dirs created.
            g.log.info("Get stat of all the files/dirs created.")
            ret = get_mounts_stat(self.mounts)
            self.assertTrue(ret, "Stat failed on some of the clients")
            g.log.info("Successfully got stat of all files/dirs created")

            # Create snapshot
            g.log.info("Creating snapshot for volume %s", self.volname)
            ret, _, _ = snap_create(self.mnode, self.volname, "snapy%s" % i)
            self.assertEqual(
                ret, 0, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot created successfully for volume  %s",
                       self.volname)

            # Check for no of snaps using snap_list
            snap_list = get_snap_list(self.mnode)
            self.assertEqual(
                i, len(snap_list), "No of snaps not consistent "
                "for volume %s" % self.volname)
            g.log.info("Successfully validated number of snaps.")

            # Increase counter for next iteration
            self.counter = 1000

        # Restore volume to snapshot snapy2, it should fail
        i = 2
        g.log.info("Starting to restore volume to snapy%s", i)
        ret, _, err = snap_restore(self.mnode, "snapy%s" % i)
        errmsg = ("snapshot restore: failed: Volume (%s) has been started. "
                  "Volume needs to be stopped before restoring a snapshot.\n" %
                  self.volname)
        log_msg = ("Expected : %s, but Returned : %s", errmsg, err)
        self.assertEqual(err, errmsg, log_msg)
        g.log.info("Expected : Failed to restore volume to snapy%s", i)
    def test_validate_snaps_create(self):
        # Creating snapshot using gluster snapshot create <snap1> <vol-name>
        cmd_str = "gluster snapshot create %s %s" % ("snap1", self.volname)
        ret = g.run(self.mnode, cmd_str)
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap1 created successfully for volume  %s" %
                   (self.volname))
        """ Create snapshot of volume using
           -- gluster snapshot create <snap2> <vol-name(s)> [description
          <description with words and quotes>]
        """
        desc = 'description this is a snap with "snap2" name and description'
        cmd_str = ("gluster snapshot create %s %s %s" %
                   ("snap2", self.volname, desc))
        ret = g.run(self.mnode, cmd_str)
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap2 created successfully for volume  %s" %
                   (self.volname))

        # Create one more snapshot of volume using force
        cmd_str = ("gluster snapshot create %s %s %s" %
                   ("snap3", self.volname, "force"))
        ret = g.run(self.mnode, cmd_str)
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap3 created successfully for volume  %s" %
                   (self.volname))

        # Create one more snapshot of volume using no-timestamp option
        cmd_str = ("gluster snapshot create %s %s %s" %
                   ("snap4", self.volname, "no-timestamp"))
        ret = g.run(self.mnode, cmd_str)
        self.assertTrue(ret,
                        ("Failed to create snapshot for %s" % self.volname))
        g.log.info("Snapshot snap4 created successfully for volume  %s" %
                   (self.volname))

        # Delete all snaps
        ret, _, _ = snap_delete_all(self.mnode)
        self.assertEqual(ret, 0, "Snapshot delete failed.")
        g.log.info("Successfully deleted all snaps")

        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Create 5 snaps while IO is in progress
        for i in range(0, 5):
            cmd_str = "gluster snapshot create %s %s %s" % (
                "snapy%s" % i, self.volname, "no-timestamp")
            ret = g.run(self.mnode, cmd_str)
            self.assertTrue(
                ret, ("Failed to create snapshot for %s" % self.volname))
            g.log.info("Snapshot %s created successfully for volume  %s" %
                       ("snapy%s" % i, self.volname))

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Check for no of snaps using snap_list it should be 5 now
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(
            5, len(snap_list), "No of snaps not consistent "
            "for volume %s" % self.volname)
        g.log.info("Successfully validated number of snaps.")

        # Validate all snaps created during IO
        for i in range(0, 5):
            self.assertIn("snapy%s" % i, snap_list, "%s snap not "
                          "found " % ("snapy%s" % i))
        g.log.info("Sucessfully validated names of snap")
    def test_root_squash_enable(self):
        """
        Tests to verify Nfs Ganesha rootsquash functionality when glusterd
        service is restarted
        Steps:
        1. Create some files and dirs inside mount point
        2. Set permission as 777 for mount point
        3. Enable root-squash on volume
        4. Create some more files and dirs
        5. Restart glusterd on all the nodes
        6. Try to edit file created in step 1
           It should not allow to edit the file
        7. Try to edit the file created in step 5
           It should allow to edit the file
        """
        # Start IO on mount point.
        cmd = ("for i in {1..10}; do touch %s/file$i; done"
               % self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system, cmd,
                            user=self.mounts[0].user)
        self.assertEqual(ret, 0, err)

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successful in getting stats of files/dirs "
                   "from mount point")

        # Set mount point permission to 777
        ret = set_file_permissions(self.mounts[0].client_system,
                                   self.mounts[0].mountpoint, 777)
        self.assertTrue(ret, "Failed to set permission for directory")
        g.log.info("Successfully set permissions for directory")

        # Enable root-squash on volume
        ret = set_root_squash(self.servers[0], self.volname)
        self.assertTrue(ret, "Failed to enable root-squash on volume")
        g.log.info("root-squash is enable on the volume")

        # Start IO on mount point.
        cmd = ("for i in {1..10}; do touch %s/Squashfile$i; done"
               % self.mounts[0].mountpoint)
        ret, _, err = g.run(self.mounts[0].client_system, cmd,
                            user=self.mounts[0].user)
        self.assertEqual(ret, 0, err)

        # Get stat of all the files/dirs created.
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfull in getting stats of files/dirs "
                   "from mount point")

        # Restart glusterd on all servers
        ret = restart_glusterd(self.servers)
        self.assertTrue(ret, ("Failed to restart glusterd on all servers %s",
                              self.servers))
        g.log.info("Successfully restarted glusterd on all servers %s",
                   self.servers)

        # Check if glusterd is running on all servers
        ret = is_glusterd_running(self.servers)
        self.assertEqual(ret, 0, ("Failed:Glusterd is not running on all "
                                  "servers %s",
                                  self.servers))
        g.log.info("Glusterd is running on all the servers %s", self.servers)

        # Checking if peer is connected.
        ret = wait_for_peers_to_connect(self.mnode, self.servers)
        self.assertTrue(ret, "Failed:Peer is not in connected state.")
        g.log.info("Peers are in connected state.")

        # Edit file created by root user
        for mount_obj in self.mounts:
            ret = append_string_to_file(mount_obj.client_system,
                                        "%s/file10" % mount_obj.mountpoint,
                                        'hello')
            self.assertFalse(ret, "Unexpected:nfsnobody user editing file "
                                  "created by root user should FAIL")
            g.log.info("Successful:nfsnobody user failed to edit file "
                       "created by root user")

        # Edit the file created by nfsnobody user
        for mount_obj in self.mounts:
            ret = append_string_to_file(mount_obj.client_system,
                                        "%s/Squashfile5"
                                        % mount_obj.mountpoint,
                                        'hello')
            self.assertTrue(ret, "Unexpected:nfsnobody user failed to edit "
                            "the file created by nfsnobody user")
            g.log.info("Successful:nfsnobody user successfully edited the "
                       "file created by nfsnobody user")
    def test_volume_set_ops_sub_dirs_mounted(self):
        """
        Check volume start/volume stop/volume reset operations while sub-dirs
        are mounted

        Steps:
        1. Create two sub-directories on mounted volume.
        2. Unmount volume from clients.
        3. Mount each sub-directory to two different clients.
        4. Perform IO on mounts.
        5. Perform volume stop operation.
        6. Perform volume start operation.
        7. Perform volume reset operation.
        """
        # Creating two sub directories on mounted volume
        ret = mkdir(self.mounts[0].client_system,
                    "%s/d1" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'd1' in volume %s "
                  "from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        ret = mkdir(self.mounts[0].client_system,
                    "%s/d2" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'd2' in volume %s "
                  "from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))

        # Unmounting volumes
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Failed to un mount one or more volumes")
        g.log.info("Successfully un mounted all volumes")

        # Mounting one sub directory on each client.
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/d1" % self.volname
        self.subdir_mounts[1].volname = "%s/d2" % self.volname
        for mount_obj in self.subdir_mounts:
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount sub directory %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted sub directory %s on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully mounted sub directories to clients.")

        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.subdir_mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.subdir_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Stop volume
        g.log.info("Stopping volume: %s", self.volname)
        ret, _, _ = volume_stop(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to stop volume: %s" % self.volname)

        # Start volume
        g.log.info("Starting volume again: %s", self.volname)
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to start volume: %s" % self.volname)

        # Reset volume
        g.log.info("Resetting volume: %s", self.volname)
        ret, _, _ = volume_reset(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to reset volume: %s" % self.volname)
    def test_validate_snaps_max_limit(self):
        # pylint: disable=too-many-statements
        # Start IO on all mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" % (
                       self.script_upload_path, count,
                       mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system, cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # set config snap-max-hard-limit for 10 snpas
        cmd_str = ("gluster snapshot config snap-max-hard-limit 10"
                   " --mode=script")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0, "Failed to set snap-max-hard-limit to 10.")
        g.log.info("snap-max-hard-limit successfully set for 10.")

        # set config snap-max-soft-limit to 50%
        cmd_str = ("gluster snapshot config snap-max-soft-limit 50"
                   " --mode=script")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0, "Failed to set snap-max-soft-limit to 50%.")
        g.log.info("snap-max-soft-limit successfully set for 50%.")

        # Create 5 snaps
        for i in range(1, 6):
            cmd_str = "gluster snapshot create %s %s %s" % ("snapy%s" % i,
                                                            self.volname,
                                                            "no-timestamp")
            ret, _, _ = g.run(self.mnode, cmd_str)
            self.assertEqual(ret, 0, ("Failed to create snapshot for %s"
                                      % self.volname))
            g.log.info("Snapshot snapy%s created successfully"
                       " for volume  %s", i, self.volname)

        # Check for no. of snaps using snap_list it should be 5
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(5, len(snap_list), "Expected 5 snapshots. "
                         "Found %s snapshots" % len(snap_list))
        g.log.info("Successfully validated number of snapshots.")

        # Validate all 5 snap names created during
        for i in range(1, 6):
            self.assertTrue(("snapy%s" % i in snap_list), "%s snap not "
                            "found " % ("snapy%s" % i))
        g.log.info("Successfully validated names of snapshots")

        # create 6th snapshot
        cmd_str = "gluster snapshot create %s %s %s" % ("snapy6", self.volname,
                                                        "no-timestamp")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0, ("Failed to create snap6 "
                                  "for %s" % self.volname))
        g.log.info("Snapshot 'snapy6' created as it is 6th snap")

        # set config snap-max-soft-limit to 100%
        cmd_str = ("gluster snapshot config snap-max-soft-limit 100"
                   " --mode=script")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0, "Failed to set snap-max-soft-limit to 100%.")
        g.log.info("snap-max-soft-limit successfully set for 100%.")

        # create 7th snapshot
        cmd_str = "gluster snapshot create %s %s %s" % ("snapy7", self.volname,
                                                        "no-timestamp")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0, ("Failed to create "
                                  "snap7 for %s" % self.volname))
        g.log.info("Snapshot 'snapy7' created as it is 7th snap")

        # Create 3 snaps
        for i in range(8, 11, 1):
            cmd_str = "gluster snapshot create %s %s %s" % ("snapy%s" % i,
                                                            self.volname,
                                                            "no-timestamp")
            ret, _, _ = g.run(self.mnode, cmd_str)
            self.assertEqual(ret, 0, ("Failed to create snapshot for %s"
                                      % self.volname))
            g.log.info("Snapshot snapy%s created successfully "
                       "for volume  %s", i, self.volname)

        # Check for no. of snaps using snap_list it should be 10
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(len(snap_list), 10, "Expected 10 snapshots. "
                         "found %s snapshots" % len(snap_list))
        g.log.info("Successfully validated number of snapshots.")

        # Validate all 10 snap names created
        for i in range(1, 11, 1):
            self.assertTrue(("snapy%s" % i in snap_list), "%s snap not "
                            "found " % ("snapy%s" % i))
        g.log.info("Successfully validated names of snapshots")

        # create 11th snapshot
        cmd_str = "gluster snapshot create %s %s %s" % ("snap", self.volname,
                                                        "no-timestamp")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertNotEqual(ret, 0, ("Unexpected: successfully created 'snap' "
                                     "for %s" % self.volname))
        g.log.info("Expected: Snapshot 'snap' not created as it is 11th snap")

        # Check for no. of snaps using snap_list it should be 10
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(len(snap_list), 10, "Expected 10 snapshots. "
                         "found %s snapshots" % len(snap_list))
        g.log.info("Successfully validated number of snapshots.")

        # modify config snap-max-hard-limit for 20 snpas
        cmd_str = ("gluster snapshot config snap-max-hard-limit 20"
                   " --mode=script")
        ret, _, _ = g.run(self.mnode, cmd_str)
        self.assertEqual(ret, 0, "Failed to set snap-max-hard-limit to 20.")
        g.log.info("snap-max-hard-limit successfully set for 20.")

        # Create 10 snaps
        for i in range(11, 21, 1):
            cmd_str = "gluster snapshot create %s %s %s" % ("snapy%s" % i,
                                                            self.volname,
                                                            "no-timestamp")
            ret, _, _ = g.run(self.mnode, cmd_str)
            self.assertEqual(ret, 0, ("Failed to create snapshot for %s"
                                      % self.volname))
            g.log.info("Snapshot snapy%s created successfully for "
                       "volume  %s", i, self.volname)

        # Check for no. of snaps using snap_list it should be 20
        snap_list = get_snap_list(self.mnode)
        self.assertEqual(len(snap_list), 20, "Expected 20 snapshots. "
                         "found %s snapshots" % len(snap_list))
        g.log.info("Successfully validated number of snaps.")