def test_rmdir_child_when_nonhash_vol_down(self):
        """
        case -1:
        - create parent
        - bring down a non-hashed subvolume for directory child
        - create parent/child
        - rmdir /mnt/parent will fail with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object
        # Find a non hashed subvolume(or brick)

        # Create parent dir
        parent_dir = self.mountpoint + '/parent'
        child_dir = parent_dir + '/child'
        ret = mkdir(self.clients[0], parent_dir)
        self.assertTrue(ret, "mkdir failed")
        g.log.info("mkdir of parent directory %s successful", parent_dir)

        # Find a non hashed subvolume(or brick)
        nonhashed_subvol, count = find_nonhashed_subvol(self.subvols,
                                                        "parent", "child")
        self.assertIsNotNone(nonhashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("nonhashed_subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ("Error in bringing down subvolume %s"
                              % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # Create child-dir
        ret = mkdir(self.clients[0], child_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % child_dir))
        g.log.info("mkdir of child directory %s successful", child_dir)

        # 'rmdir' on parent should fail with ENOTCONN
        ret = rmdir(self.clients[0], parent_dir)
        self.assertFalse(ret, ('Expected rmdir to fail for %s' % parent_dir))
        g.log.info("rmdir of parent directory %s failed as expected",
                   parent_dir)

        # Cleanup
        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success')
        sleep(10)

        # Delete parent_dir
        ret = rmdir(self.clients[0], parent_dir, force=True)
        self.assertTrue(ret, ('rmdir failed for %s ' % parent_dir))
        g.log.info("rmdir of directory %s successful", parent_dir)
Beispiel #2
0
    def tearDown(self):
        g.log.info("Tear down")

        for mount_point in self.mounts:
            g.log.debug('Removing temporary folder %s',
                        self.temp_folder)
            rmdir(mount_point.client_system, self.temp_folder,
                  force=True)

        g.log.info("Starting to Unmount Volume and Cleanup Volume")
        ret = self.unmount_volume_and_cleanup_volume(self.mounts)
        if not ret:
            g.log.error("Failed to Setup and Mount Volume")
            raise ExecutionError('Failed to unmount and clean volumes')
        self.get_super_method(self, 'tearDown')()
    def tearDownClass(cls):
        """
        Clean up the volume and umount volume from client
        """
        # umount all volumes
        for mount_obj in cls.mounts:
            ret, _, _ = umount_volume(
                mount_obj.client_system, mount_obj.mountpoint)
            if ret:
                raise ExecutionError(
                    "Failed to umount on volume %s "
                    % cls.volname)
            g.log.info("Successfully umounted %s on client %s",
                       cls.volname, mount_obj.client_system)
            ret = rmdir(mount_obj.client_system, mount_obj.mountpoint)
            if not ret:
                raise ExecutionError(
                    ret, "Failed to remove directory mount directory.")
            g.log.info("Mount directory is removed successfully")

        # stopping all volumes
        g.log.info("Starting to Cleanup all Volumes")
        volume_list = get_volume_list(cls.mnode)
        for volume in volume_list:
            ret = cleanup_volume(cls.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to cleanup Volume %s" % volume)
            g.log.info("Volume: %s cleanup is done", volume)
        g.log.info("Successfully Cleanedup all Volumes")

        # calling GlusterBaseClass tearDownClass
        cls.get_super_method(cls, 'tearDownClass')()
    def test_rmdir_parent_pre_nonhash_vol_down(self):
        """
        case -4:
        - Bring down a non-hashed subvol for parent_dir
        - mkdir parent
        - rmdir parent should fails with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object

        nonhashed_subvol, count = find_nonhashed_subvol(self.subvols,
                                                        "/", "parent")
        self.assertIsNotNone(nonhashed_subvol,
                             'Error in finding  nonhashed subvol')
        g.log.info("nonhashed subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s'
                              % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        parent_dir = self.mountpoint + '/parent'
        ret = mkdir(self.clients[0], parent_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % parent_dir))
        g.log.info("mkdir of parent directory %s successful", parent_dir)

        # 'rmdir' on parent should fail with ENOTCONN
        ret = rmdir(self.clients[0], parent_dir)
        self.assertFalse(ret, ('Expected rmdir to fail for %s' % parent_dir))
        g.log.info("rmdir of parent directory %s failed as expected",
                   parent_dir)

        # Cleanup
        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success.')
        sleep(10)

        # Delete parent_dir
        ret = rmdir(self.clients[0], parent_dir, force=True)
        self.assertTrue(ret, ('rmdir failed for %s ' % parent_dir))
        g.log.info("rmdir of directory %s successful", parent_dir)
Beispiel #5
0
    def tearDown(self):
        """
        If test method failed before validating IO, tearDown waits for the
        IO's to complete and checks for the IO exit status

        Cleanup and umount volume
        """
        if not self.io_validation_complete:
            g.log.info("Wait for IO to complete as IO validation did not "
                       "succeed in test method")
            ret = wait_for_io_to_complete(self.all_mounts_procs, self.mounts)
            if not ret:
                raise ExecutionError("IO failed on some of the clients")
            g.log.info("IO is successful on all mounts")

            # List all files and dirs created
            g.log.info("List all files and directories:")
            ret = list_all_files_and_dirs_mounts(self.mounts)
            if not ret:
                raise ExecutionError("Failed to list all files and dirs")
            g.log.info("Listing all files and directories is successful")

        # umount all volumes
        for mount_point in self.mount_points:
            ret, _, _ = umount_volume(self.client, mount_point)
            if ret:
                raise ExecutionError("Failed to umount on volume %s " %
                                     self.volname)
            g.log.info("Successfully umounted %s on client %s", self.volname,
                       self.client)
            ret = rmdir(self.client, mount_point)
            if not ret:
                raise ExecutionError(
                    "Failed to remove directory mount directory.")
            g.log.info("Mount directory is removed successfully")

        # stopping all volumes
        volume_list = get_volume_list(self.mnode)
        for volume in volume_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Failed to cleanup Volume %s" % volume)
            g.log.info("Volume: %s cleanup is done", volume)
        g.log.info("Successfully Cleanedup all Volumes")

        # calling GlusterBaseClass tearDown
        self.get_super_method(self, 'tearDown')()
Beispiel #6
0
 def _rmdir_on_mountpoint(self, clients, mountpoint):
     """ Perform rm of created files as part of Sanity Check """
     # Skipping below lines of code as running rm -rf parallely
     # from multiple clients is a known bug Refer BZ-1787328
     # cmd = 'rm -rf ' + mountpoint
     # results = g.run_parallel(clients, cmd)
     # for client, ret_values in results.items():
     #    ret, out, err = ret_values
     #    self.assertEqual(ret, 0, "rm -rf failed on %s with %s"
     #                     % (client, err))
     ret = rmdir(choice(clients), mountpoint + '/*', force=True)
     self.assertTrue(ret, "rm -rf failed")
     ret, out, err = g.run(choice(clients), 'ls ' + mountpoint)
     self.assertEqual(
         (ret, out, err), (0, '', ''),
         "Some entries still exist even after rm -rf ;"
         " the entries are %s and error msg is %s" % (out, err))
     g.log.info("rm -rf was successful")
    def tearDown(self):

        # Setting storage.reserve to Default
        ret = set_volume_options(self.mnode, self.volname,
                                 {'storage.reserve': '0'})
        if not ret:
            raise ExecutionError("Failed to reset storage reserve on %s" %
                                 self.mnode)
        g.log.info("Able to reset storage reserve successfully on %s",
                   self.mnode)

        # Unmounting the volume.
        ret, _, _ = umount_volume(mclient=self.mounts[0].client_system,
                                  mpoint=self.mounts[0].mountpoint)
        if ret:
            raise ExecutionError("Volume %s is not unmounted" % self.volname)
        g.log.info("Volume unmounted successfully : %s", self.volname)
        ret = rmdir(self.mounts[0].client_system, self.mounts[0].mountpoint)
        if not ret:
            raise ExecutionError("Failed to remove directory mount directory.")
        g.log.info("Mount directory is removed successfully")

        # clean up all volumes
        vol_list = get_volume_list(self.mnode)
        if not vol_list:
            raise ExecutionError("Failed to get the volume list")

        for volume in vol_list:
            ret = cleanup_volume(self.mnode, volume)
            if not ret:
                raise ExecutionError("Unable to delete volume % s" % volume)
            g.log.info("Volume deleted successfully : %s", volume)

        # Cleaning the deleted volume bricks
        for brick in self.brick_list:
            node, brick_path = brick.split(r':')
            cmd = "rm -rf " + brick_path
            ret, _, _ = g.run(node, cmd)
            if ret:
                raise ExecutionError("Failed to delete the brick "
                                     "dir's of deleted volume")

        GlusterBaseClass.tearDown.im_func(self)
Beispiel #8
0
    def test_afr_reset_brick_heal_full(self):
        """
         1. Create files/dirs from mount point
         2. With IO in progress execute reset-brick start
         3. Now format the disk from back-end, using rm -rf <brick path>
         4. Execute reset brick commit and check for the brick is online.
         5. Issue volume heal using "gluster vol heal <volname> full"
         6. Check arequal for all bricks to verify all backend bricks
            including the resetted brick have same data
        """
        self.all_mounts_procs = []
        for count, mount_obj in enumerate(self.mounts):
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d --dir-depth 3 --dir-length 5 "
                   "--max-num-of-dirs 5 --num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)

        all_bricks = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(all_bricks, "Unable to fetch bricks of volume")
        brick_to_reset = choice(all_bricks)

        # Start reset brick
        ret, _, err = reset_brick(self.mnode,
                                  self.volname,
                                  src_brick=brick_to_reset,
                                  option="start")
        self.assertEqual(ret, 0, err)
        g.log.info("Reset brick: %s started", brick_to_reset)

        # Validate the brick is offline
        ret = are_bricks_offline(self.mnode, self.volname, [brick_to_reset])
        self.assertTrue(ret, "Brick:{} is still online".format(brick_to_reset))

        # rm -rf of the brick directory
        node, brick_path = brick_to_reset.split(":")
        ret = rmdir(node, brick_path, force=True)
        self.assertTrue(
            ret, "Unable to delete the brick {} on "
            "node {}".format(brick_path, node))

        # Reset brick commit
        ret, _, err = reset_brick(self.mnode,
                                  self.volname,
                                  src_brick=brick_to_reset,
                                  option="commit")
        self.assertEqual(ret, 0, err)
        g.log.info("Reset brick committed successfully")

        # Check the brick is online
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(
            ret, "Few volume processess are offline for the "
            "volume: {}".format(self.volname))

        # Trigger full heal
        ret = trigger_heal_full(self.mnode, self.volname)
        self.assertTrue(ret, "Unable  to trigger the heal full command")

        # Wait for the heal completion
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, "Heal didn't complete in 20 mins time")

        # Validate io on the clients
        ret = validate_io_procs(self.all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on the mounts")
        self.all_mounts_procs *= 0

        # Check arequal of the back-end bricks after heal completion
        all_subvols = get_subvols(self.mnode, self.volname)['volume_subvols']
        for subvol in all_subvols:
            ret, arequal_from_subvol = collect_bricks_arequal(subvol)
            self.assertTrue(
                ret, "Arequal is collected successfully across the"
                " bricks in the subvol {}".format(subvol))
            self.assertEqual(
                len(set(arequal_from_subvol)), 1, "Arequal is "
                "same on all the bricks in the subvol")
    def test_limit_usage_deep_dir(self):
        # pylint: disable=too-many-statements
        """
        Verifying directory quota functionality with respect to the
        limit-usage option. Set limits on various directories [breadth]
        and check for the quota list of all the directories.

        * Enable Quota
        * Create 10 directories one inside the other and set limit of 1GB
          on each directory
        * Perform a quota list operation
        * Create some random amount of data inside each directory
        * Perform a quota list operation
        * Remove the quota limit and delete the data
        """
        # Enable Quota
        g.log.info("Enabling quota on the volume %s", self.volname)
        ret, _, _ = quota_enable(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to enable quota on the volume %s",
                                  self.volname))
        g.log.info("Successfully enabled quota on the volume %s", self.volname)

        # Create deep directories in the mount point
        for mount_object in self.mounts:
            g.log.info("Creating directories on %s:%s",
                       mount_object.client_system, mount_object.mountpoint)
            ret = mkdir(mount_object.client_system,
                        "%s/dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9/dir10"
                        % (mount_object.mountpoint), parents=True)
            self.assertTrue(ret, ("Failed to create dir under %s-%s",
                                  mount_object.client_system,
                                  mount_object.mountpoint))
            g.log.info("Successfully created deep directories on %s:%s",
                       mount_object.client_system, mount_object.mountpoint)

        # Validate IO
        self.assertTrue(
            validate_io_procs(self.all_mounts_procs, self.mounts),
            "IO failed on some of the clients"
        )

        # Set soft timeout to 1 second
        g.log.info("Set quota soft timeout:")
        ret, _, _ = quota_set_soft_timeout(self.mnode, self.volname, '1sec')
        self.assertEqual(ret, 0, ("Failed to set soft timeout"))
        g.log.info("Quota soft timeout set successful")

        # Set hard timeout to 0 second
        g.log.info("Set quota hard timeout:")
        ret, _, _ = quota_set_hard_timeout(self.mnode, self.volname, '0sec')
        self.assertEqual(ret, 0, ("Failed to set hard timeout"))
        g.log.info("Quota hard timeout set successful")

        # Get dir list
        g.log.info('Getting dir list in %s', self.volname)
        cmd = ("ls -R %s | grep ':' | tr -d :" % self.mounts[0].mountpoint)
        ret, out, err = g.run(self.mounts[0].client_system, cmd)
        g.log.info('mountpoint %s', self.mounts[0].mountpoint)
        self.assertFalse(ret, err)
        dir_list = out.split()
        for dir_name in dir_list:
            # Parsed to remove the mount point as quota doesn't work when
            # passed with mountpoint.
            tmp_name = dir_name.replace(self.mounts[0].mountpoint, "")
            dir_list[dir_list.index(dir_name)] = '%s' % tmp_name
        dir_list.pop(0)
        # The first entry of ls -R is the current directory which is not
        # necessary.

        # Set limit of 1 GB on every directory created inside the mountpoint
        g.log.info("Set Quota Limit on each directory of the volume %s",
                   self.volname)
        for dir_name in dir_list:
            ret, _, _ = quota_limit_usage(self.mnode, self.volname,
                                          dir_name, '1GB')
            self.assertFalse(ret, "Failed to set Quota for dir %s" %
                             dir_name)
            g.log.info("Set quota for dir %s successfully", dir_name)
        g.log.info("Successfully set the Quota limit on each path of the "
                   "volume %s", self.volname)

        # Validate quota on every Directory of the Volume
        g.log.info("Get Quota list for every directory on the volume %s",
                   self.volname)
        for dir_name in dir_list:
            ret = quota_validate(self.mnode, self.volname, path=dir_name,
                                 hard_limit=1073741824)
            self.assertTrue(ret, "Quota validate Failed for dir %s" %
                            dir_name)

        # Create some data inside each directory and do a quota validate
        self.all_mounts_procs = []
        for mount_object in self.mounts:
            g.log.info("Creating Files on %s:%s", mount_object.client_system,
                       mount_object.mountpoint)
            # Data creation
            # Creates one file of rand[0] size in each dir
            rand = random.sample([1, 10, 512], 1)
            cmd = ("/usr/bin/env python %s create_files "
                   "--fixed-file-size %sk %s/%s" % (
                       self.script_upload_path,
                       rand[0], mount_object.mountpoint, dir_list[0]))

            ret, _, _ = g.run(mount_object.client_system, cmd)
            self.assertFalse(ret, "Failed to create files")

            # quota_validate for each dir
            for dir_num, dir_name in enumerate(dir_list):
                # To calculate the dir usage for quota
                usage = (rand[0] * 1024) + \
                         ((len(dir_list) - (dir_num + 1)) * rand[0] * 1024)
                if usage >= 1073741824:
                    raise ExecutionError("usage crossed hardlimit")
                ret = quota_validate(self.mnode, self.volname, path=dir_name,
                                     hard_limit=1073741824, used_space=usage)
                self.assertTrue(ret, "Quota validate Failed for dir %s" %
                                dir_name)
                g.log.info("Quota list validate  and file created successful "
                           "for %s", dir_name)
            g.log.info("Files created and quota validated successfully")

        # Deleting data and validating quota
        self.all_mounts_procs = []
        # Deleting deep directories in the mount point
        for mount_object in self.mounts:
            ret = rmdir(mount_object.client_system, "%s/dir1/dir2" %
                        (mount_object.mountpoint), force=True)
            self.assertTrue(ret, ("Failed to delete dir under %s/dir1/dir2"
                                  % (mount_object.mountpoint)))
            g.log.info("Successfully deleted deep directories")
            # Quota validate
            # converting into bytes
            usage = (rand[0] * 1024)
            ret = quota_validate(self.mnode, self.volname,
                                 path=dir_list[0],
                                 used_space=usage)
            self.assertTrue(ret, "Quota validate Failed for dir /dir1")
            g.log.info("Quota list validate successful for /dir1")

        # Remove Quota limit
        g.log.info("Get Quota list for every directory on the volume %s",
                   self.volname)
        ret = quota_remove(self.mnode, self.volname, path=dir_list[0])
        self.assertTrue(ret, "Failed to remove Quota for dir %s" % dir_name)
        g.log.info("Quota remove  for dir %s successfully", dir_name)
    def test_rm_file_when_nonhash_vol_down(self):
        """
        case -3:
        - create parent
        - mkdir parent/child
        - touch parent/child/file
        - bringdown a subvol where file is not present
        - rm -rf parent
            - Only file should be deleted
            - rm -rf of parent should fail with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object

        # Find a non hashed subvolume(or brick)
        # Create parent dir
        parent_dir = self.mountpoint + '/parent'
        child_dir = parent_dir + '/child'
        ret = mkdir(self.clients[0], parent_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % parent_dir))
        g.log.info("mkdir of parent directory %s successful", parent_dir)

        # Create child dir
        ret = mkdir(self.clients[0], child_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % child_dir))
        g.log.info("mkdir of child directory %s successful", child_dir)

        # Create a file under child_dir
        file_one = child_dir + '/file_one'
        ret, _, err = g.run(self.clients[0], ("touch %s" % file_one))
        self.assertFalse(ret, ('touch failed for %s err: %s' %
                               (file_one, err)))

        # Find a non hashed subvolume(or brick)
        nonhashed_subvol, count = find_nonhashed_subvol(self.subvols,
                                                        "parent/child",
                                                        "file_one")
        self.assertIsNotNone(nonhashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("nonhashed_subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s'
                              % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rm -rf' on parent should fail with ENOTCONN
        ret = rmdir(self.clients[0], parent_dir)
        self.assertFalse(ret, ('Expected rmdir to fail for %s' % parent_dir))
        g.log.info("rmdir of parent directory %s failed as expected"
                   " with err %s", parent_dir, err)

        brickobject = create_brickobjectlist(self.subvols, "parent/child")
        self.assertIsNotNone(brickobject,
                             "could not create brickobject list")
        # Make sure file_one is deleted
        for brickdir in brickobject:
            dir_path = "%s/parent/child/file_one" % brickdir.path
            brick_path = dir_path.split(":")
            self.assertTrue((file_exists(brickdir._host, brick_path[1])) == 0,
                            ('Expected file %s not to exist on servers'
                             % parent_dir))
        g.log.info("file is deleted as expected")

        # Cleanup
        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success.')
        sleep(10)

        # Delete parent_dir
        ret = rmdir(self.clients[0], parent_dir, force=True)
        self.assertTrue(ret, ('rmdir failed for %s ' % parent_dir))
        g.log.info("rmdir of directory %s successful", parent_dir)
    def test_rmdir_dir_when_hash_nonhash_vol_down(self):
        """
        case -2:
        - create dir1 and dir2
        - bring down hashed subvol for dir1
        - bring down a non-hashed subvol for dir2
        - rmdir dir1 should fail with ENOTCONN
        - rmdir dir2 should fail with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object

        # Create dir1 and dir2
        directory_list = []
        for number in range(1, 3):
            directory_list.append('{}/dir{}'.format(self.mountpoint, number))
            ret = mkdir(self.clients[0], directory_list[-1])
            self.assertTrue(ret, ('mkdir failed for %s '
                                  % directory_list[-1]))
            g.log.info("mkdir of directory %s successful",
                       directory_list[-1])

        # Find a non hashed subvolume(or brick)
        nonhashed_subvol, count = find_nonhashed_subvol(self.subvols, "/",
                                                        "dir1")
        self.assertIsNotNone(nonhashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("nonhashed_subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s'
                              % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rmdir' on dir1 should fail with ENOTCONN
        ret = rmdir(self.clients[0], directory_list[0])
        self.assertFalse(ret, ('Expected rmdir to fail for %s'
                               % directory_list[0]))
        g.log.info("rmdir of directory %s failed as expected",
                   directory_list[0])

        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success')
        sleep(10)

        # Unmounting and Mounting the volume back to Heal
        ret, _, err = umount_volume(self.clients[1], self.mountpoint)
        self.assertFalse(ret, "Error in creating temp mount %s" % err)

        ret, _, err = mount_volume(self.volname,
                                   mtype='glusterfs',
                                   mpoint=self.mountpoint,
                                   mserver=self.servers[0],
                                   mclient=self.clients[1])
        self.assertFalse(ret, "Error in creating temp mount")

        ret, _, _ = g.run(self.clients[1], ("ls %s/dir1" % self.mountpoint))
        self.assertEqual(ret, 0, "Error in lookup for dir1")
        g.log.info("lookup successful for dir1")

        # This confirms that healing is done on dir1
        ret = validate_files_in_dir(self.clients[0],
                                    directory_list[0],
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "validate_files_in_dir for dir1 failed")
        g.log.info("healing successful for dir1")

        # Bring down the hashed subvol
        # Find a hashed subvolume(or brick)
        hashed_subvol, count = find_hashed_subvol(self.subvols, "/", "dir2")
        self.assertIsNotNone(hashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("hashed_subvol %s", hashed_subvol._host)

        # Bring hashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s',
                              self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rmdir' on dir2 should fail with ENOTCONN
        ret = rmdir(self.clients[0], directory_list[1])
        self.assertFalse(ret, ('Expected rmdir to fail for %s'
                               % directory_list[1]))
        g.log.info("rmdir of dir2 directory %s failed as expected",
                   directory_list[1])

        # Cleanup
        # Bring up the subvol - restart the volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success')
        sleep(10)

        # Delete dirs
        for directory in directory_list:
            ret = rmdir(self.clients[0], directory)
            self.assertTrue(ret, ('rmdir failed for %s ' % directory))
            g.log.info("rmdir of directory %s successful", directory)
    def perform_test(self, ctime):
        """
        Testcase steps:
        1. Enable/disable features,ctime based on function argument.
        2. Create a directory on the mount point.
        3. Kill a brick and create a file inside the directory.
        4. Bring the brick online.
        5. Trigger heal and wait for its completion.
        6. Verify that the atime, mtime and ctime of the directory are same on
           all bricks of the replica.
        """
        if ctime:
            option = {'features.ctime': 'on'}
        else:
            option = {'features.ctime': 'off'}
        ret = set_volume_options(self.mnode, self.volname, option)
        self.assertTrue(
            ret, 'failed to set option %s on %s' % (option, self.volume))

        client, m_point = (self.mounts[0].client_system,
                           self.mounts[0].mountpoint)

        dirpath = '{}/dir1'.format(m_point)
        ret = mkdir(client, dirpath)
        self.assertTrue(ret, 'Unable to create a directory from mount point')

        bricks_to_bring_offline = select_volume_bricks_to_bring_offline(
            self.mnode, self.volname)
        self.assertIsNotNone(bricks_to_bring_offline, "List is empty")
        ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
        self.assertTrue(
            ret, 'Failed to bring bricks {} offline'.format(
                bricks_to_bring_offline))
        ret = are_bricks_offline(self.mnode, self.volname,
                                 bricks_to_bring_offline)
        self.assertTrue(
            ret, 'Bricks {} are not offline'.format(bricks_to_bring_offline))

        cmd = 'touch {}/file1'.format(dirpath)
        ret, _, _ = g.run(client, cmd)
        self.assertEqual(ret, 0, 'Unable to create file from mount point')

        ret = bring_bricks_online(
            self.mnode,
            self.volname,
            bricks_to_bring_offline,
            bring_bricks_online_methods=['volume_start_force'])
        self.assertTrue(
            ret,
            'Failed to bring bricks {} online'.format(bricks_to_bring_offline))
        ret = trigger_heal(self.mnode, self.volname)
        self.assertTrue(ret, 'Starting heal failed')
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal has not yet completed')

        if ctime:
            ret = self.are_mdata_xattrs_equal()
            self.assertTrue(ret,
                            "glusterfs.mdata mismatch for {}".format(dirpath))
        else:
            ret = self.are_stat_timestamps_equal()
            self.assertTrue(ret, "stat mismatch for {}".format(dirpath))

        ret = rmdir(client, dirpath, force=True)
        self.assertTrue(ret, 'Unable to delete directory from mount point')
    def test_no_dir(self):
        """
        * Enable quota on the volume
        * Set the quota on the non-existing directory
        * Create the directory as above and set limit
        * Validate the quota on the volume
        * Delete the directory
        * Validate the quota on volume
        * Recreate the directory
        * Validate the quota on volume
        * Check for volume status for all processes being online.
        """
        # Enable Quota
        g.log.info("Enabling quota on the volume %s", self.volname)
        ret, _, _ = quota_enable(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to enable quota on the volume %s",
                                  self.volname))
        g.log.info("Successfully enabled quota on the volume %s", self.volname)

        # Non existent path to set quota limit
        path = "/foo"

        # Set Quota limit on /foo of the volume
        g.log.info("Set Quota Limit on the path %s of the volume %s",
                   path, self.volname)
        ret, _, err = quota_limit_usage(self.mnode, self.volname,
                                        path=path, limit="1GB")
        self.assertIn("No such file or directory", err, "Quota limit set "
                      "on path /foo which does not exist")

        mount_obj = self.mounts[0]
        mount_dir = mount_obj.mountpoint
        client = mount_obj.client_system

        # Create the directory on which limit was tried to be set
        ret = mkdir(client, "%s/foo" % (mount_dir))
        self.assertTrue(ret, ("Failed to create dir under %s-%s",
                              client, mount_dir))
        g.log.info("Directory 'foo' created successfully")

        # Set Quota limit on /foo of the volume
        g.log.info("Set Quota Limit on the path %s of the volume %s",
                   path, self.volname)
        ret, _, err = quota_limit_usage(self.mnode, self.volname,
                                        path=path, limit="1GB")
        self.assertEqual(ret, 0, ("Failed to set quota limit on path %s of "
                                  "the volume %s", path, self.volname))
        g.log.info("Successfully set the Quota limit on %s of the volume %s",
                   path, self.volname)

        # Validate quota list
        g.log.info("Get Quota list for foo and see if hardlimit is 1GB")
        ret = quota_validate(self.mnode, self.volname, path=path,
                             hard_limit=1073741824)
        self.assertTrue(ret, "Quota validate Failed for dir foo")

        # Delete the directory
        ret = rmdir(client, "%s/foo" %
                    (mount_dir), force=True)
        self.assertTrue(ret, ("Failed to delete dir /foo"))
        g.log.info("Successfully deleted /foo")

        # Validate quota list
        g.log.info("Get empty quota list")
        quota_list1 = quota_fetch_list(self.mnode, self.volname, path=None)
        self.assertIsNone(quota_list1, ("unexpected quota list entries found"))
        g.log.info("Successfully validated quota limit usage for the "
                   "deleted directory foo")

        # Recreate the same deleted directory
        ret = mkdir(client, "%s/foo" % (mount_dir))
        self.assertTrue(ret, ("Failed to create dir under %s-%s",
                              client, mount_dir))
        g.log.info("Directory 'foo' created successfully")

        # Validate quota list
        g.log.info("Get Quota list for foo and see if hardlimit is N/A")
        ret = quota_validate(self.mnode, self.volname, path=path,
                             hard_limit='N/A')
        self.assertTrue(ret, "Quota validate Failed for dir foo")
        g.log.info("Successfully validated quota limit usage for the "
                   "recreated directory foo")

        # Verify volume's all process are online
        g.log.info("Volume %s: Verifying that all process are online",
                   self.volname)
        ret = verify_all_process_of_volume_are_online(self.mnode,
                                                      self.volname)
        self.assertTrue(ret, ("Volume %s : All process are not online ",
                              self.volname))
        g.log.info("Volume %s: All process are online", self.volname)
 def data_delete(self, dirname):
     """Delete created data"""
     dirname = self.mounts[0].mountpoint + '/' + dirname
     ret = rmdir(self.mounts[0].client_system, dirname, force=True)
     self.assertTrue(ret, 'deletion of data failed')