Exemplo n.º 1
0
    def test_rm_file_when_nonhash_vol_down(self):
        """
        case -3:
        - create parent
        - mkdir parent/child
        - touch parent/child/file
        - bringdown a subvol where file is not present
        - rm -rf parent
            - Only file should be deleted
            - rm -rf of parent should fail with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object

        # Find a non hashed subvolume(or brick)
        # Create parent dir
        parent_dir = self.mountpoint + '/parent'
        child_dir = parent_dir + '/child'
        ret = mkdir(self.clients[0], parent_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % parent_dir))
        g.log.info("mkdir of parent directory %s successful", parent_dir)

        # Create child dir
        ret = mkdir(self.clients[0], child_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % child_dir))
        g.log.info("mkdir of child directory %s successful", child_dir)

        # Create a file under child_dir
        file_one = child_dir + '/file_one'
        ret, _, err = g.run(self.clients[0], ("touch %s" % file_one))
        self.assertFalse(ret,
                         ('touch failed for %s err: %s' % (file_one, err)))

        # Find a non hashed subvolume(or brick)
        nonhashed_subvol, count = find_nonhashed_subvol(
            self.subvols, "parent/child", "file_one")
        self.assertIsNotNone(nonhashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("nonhashed_subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(
            ret, ('Error in bringing down subvolume %s' % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rm -rf' on parent should fail with ENOTCONN
        ret = rmdir(self.clients[0], parent_dir)
        self.assertFalse(ret, ('Expected rmdir to fail for %s' % parent_dir))
        g.log.info(
            "rmdir of parent directory %s failed as expected"
            " with err %s", parent_dir, err)

        brickobject = create_brickobjectlist(self.subvols, "parent/child")
        self.assertIsNotNone(brickobject, "could not create brickobject list")
        # Make sure file_one is deleted
        for brickdir in brickobject:
            dir_path = "%s/parent/child/file_one" % brickdir.path
            brick_path = dir_path.split(":")
            self.assertTrue(
                (file_exists(brickdir._host, brick_path[1])) == 0,
                ('Expected file %s not to exist on servers' % parent_dir))
        g.log.info("file is deleted as expected")

        # Cleanup
        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success.')
        sleep(10)

        # Delete parent_dir
        ret = rmdir(self.clients[0], parent_dir, force=True)
        self.assertTrue(ret, ('rmdir failed for %s ' % parent_dir))
        g.log.info("rmdir of directory %s successful", parent_dir)
    def test_directory_custom_extended_attr(self):
        """Test - set custom xattr to directory and link to directory
        """
        # pylint: disable = too-many-statements
        dir_prefix = '{root}/folder_{client_index}'

        for mount_index, mount_point in enumerate(self.mounts):
            folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                            client_index=mount_index)

            # Create a directory from mount point
            g.log.info('Creating directory : %s:%s', mount_point.mountpoint,
                       folder_name)
            ret = mkdir(mount_point.client_system, folder_name)
            self.assertTrue(
                ret, 'Failed to create directory %s on mount point %s' %
                (folder_name, mount_point.mountpoint))

            ret = file_exists(mount_point.client_system, folder_name)
            self.assertTrue(
                ret, 'Created Directory %s does not exists on mount '
                'point %s' % (folder_name, mount_point.mountpoint))
            g.log.info('Created directory %s:%s', mount_point.mountpoint,
                       folder_name)

            # Verify that hash layout values are set on each
            # bricks for the dir
            g.log.debug("Verifying hash layout values")
            ret = validate_files_in_dir(mount_point.client_system,
                                        mount_point.mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_DIR)
            self.assertTrue(
                ret, "Expected - Directory is stored "
                "on hashed bricks")
            g.log.info("Hash layout values are set on each bricks")

            # Verify that mount point should not display
            # xattr : trusted.gfid and dht
            g.log.debug("Loading extra attributes")
            ret = get_fattr_list(mount_point.client_system, folder_name)

            self.assertTrue(
                'trusted.gfid' not in ret,
                "Extended attribute trusted.gfid is presented on "
                "mount point %s and folder %s" %
                (mount_point.mountpoint, folder_name))
            self.assertTrue(
                'trusted.glusterfs.dht' not in ret,
                "Extended attribute trusted.glusterfs.dht is "
                "presented on mount point %s and folder %s" %
                (mount_point.mountpoint, folder_name))

            g.log.info(
                'Extended attributes trusted.gfid and '
                'trusted.glusterfs.dht does not exists on '
                'mount point %s:%s ', mount_point.mountpoint, folder_name)

            # Verify that mount point shows pathinfo xattr
            g.log.debug("Check for xattr trusted.glusterfs.pathinfo on %s:%s",
                        mount_point, folder_name)
            ret = get_fattr(mount_point.client_system, mount_point.mountpoint,
                            'trusted.glusterfs.pathinfo')
            self.assertIsNotNone(
                ret, "trusted.glusterfs.pathinfo is not "
                "presented on %s:%s" % (mount_point.mountpoint, folder_name))
            g.log.info(
                'pathinfo xattr is displayed on mount point %s and '
                'dir %s', mount_point.mountpoint, folder_name)

            # Create a custom xattr for dir
            g.log.info("Set attribute user.foo to %s", folder_name)
            ret = set_fattr(mount_point.client_system, folder_name, 'user.foo',
                            'bar2')
            self.assertTrue(
                ret, "Setup custom attribute on %s:%s failed" %
                (mount_point.client_system, folder_name))

            g.log.info('Set custom attribute is set on %s:%s',
                       mount_point.client_system, folder_name)
            # Verify that custom xattr for directory is displayed
            # on mount point and bricks
            g.log.debug('Check xarttr user.foo on %s:%s',
                        mount_point.client_system, folder_name)
            ret = get_fattr(mount_point.client_system, folder_name, 'user.foo')
            self.assertEqual(
                ret, 'bar2', "Xattr attribute user.foo is not presented on "
                "mount point %s and directory %s" %
                (mount_point.client_system, folder_name))

            g.log.info(
                'Custom xattr user.foo is presented on mount point'
                ' %s:%s ', mount_point.client_system, folder_name)

            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix.format(root=brick_dir,
                                               client_index=mount_index)

                ret = get_fattr(brick_server, brick_path, 'user.foo')

                g.log.debug('Check custom xattr for directory on brick %s:%s',
                            brick_server, brick_path)
                self.assertEqual(
                    'bar2', ret, "Expected: user.foo should be on brick %s\n"
                    "Actual: Value of attribute foo.bar %s" %
                    (brick_path, ret))
                g.log.info('Custom xattr is presented on brick %s', brick_path)

            # Delete custom attribute
            ret = delete_fattr(mount_point.client_system, folder_name,
                               'user.foo')
            self.assertTrue(ret, "Failed to delete custom attribute")

            g.log.info('Removed custom attribute from directory %s:%s',
                       mount_point.client_system, folder_name)
            # Verify that custom xattr is not displayed after delete
            # on mount point and on the bricks

            g.log.debug('Looking if custom extra attribute user.foo is '
                        'presented on mount or on bricks after deletion')
            self.assertIsNone(
                get_fattr(mount_point.client_system, folder_name, 'user.foo'),
                "Xattr user.foo is presented on mount point"
                " %s:%s after deletion" %
                (mount_point.mountpoint, folder_name))

            g.log.info(
                "Xattr user.foo is not presented after deletion"
                " on mount point %s:%s", mount_point.mountpoint, folder_name)

            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix.format(root=brick_dir,
                                               client_index=mount_index)
                self.assertIsNone(
                    get_fattr(brick_server, brick_path, 'user.foo'),
                    "Deleted xattr user.foo is presented on "
                    "brick %s:%s" % (brick, brick_path))
                g.log.info(
                    'Custom attribute is not presented after delete '
                    'from directory on brick %s:%s', brick, brick_path)

        # Repeat all of the steps for link of created directory
        for mount_index, mount_point in enumerate(self.mounts):
            linked_folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                                   client_index="%s_linked" %
                                                   mount_index)
            folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                            client_index=mount_index)
            # Create link to created dir
            command = 'ln -s {src} {dst}'.format(dst=linked_folder_name,
                                                 src=folder_name)
            ret, _, _ = g.run(mount_point.client_system, command)
            self.assertEquals(
                0, ret, 'Failed to create link %s to directory %s' %
                (linked_folder_name, folder_name))
            self.assertTrue(
                file_exists(mount_point.client_system, linked_folder_name),
                'Link does not exists on %s:%s' %
                (mount_point.client_system, linked_folder_name))
            g.log.info('Create link %s to directory %s', linked_folder_name,
                       folder_name)

            # Verify that hash layout values are set on each
            # bricks for the link to dir
            g.log.debug("Verifying hash layout values")
            ret = validate_files_in_dir(mount_point.client_system,
                                        mount_point.mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_LINK)
            self.assertTrue(
                ret, "Expected - Link to directory is stored "
                "on hashed bricks")
            g.log.info("Hash layout values are set on each bricks")

            # Verify that mount point should not display xattr :
            # trusted.gfid and dht
            g.log.debug("Loading extra attributes")
            ret = get_fattr_list(mount_point.client_system, linked_folder_name)

            self.assertTrue(
                'trusted.gfid' not in ret,
                "Extended attribute trudted.gfid is presented on "
                "mount point %s and folder %s" %
                (mount_point.mountpoint, linked_folder_name))

            self.assertTrue(
                'trusted.glusterfs.dht' not in ret,
                "Extended attribute trusted.glusterfs.dht is "
                "presented on mount point %s and folder %s" %
                (mount_point.mountpoint, linked_folder_name))

            g.log.info(
                'Extended attributes trusted.gfid and '
                'trusted.glusterfs.dht does not exists on '
                'mount point %s:%s ', mount_point.mountpoint,
                linked_folder_name)

            # Verify that mount point shows pathinfo xattr
            g.log.debug("Check if pathinfo is presented on %s:%s",
                        mount_point.client_system, linked_folder_name)
            self.assertIsNotNone(
                get_fattr(mount_point.client_system, mount_point.mountpoint,
                          'trusted.glusterfs.pathinfo'),
                "pathinfo is not displayed on mountpoint "
                "%s:%s" % (mount_point.client_system, linked_folder_name))
            g.log.info('pathinfo value is displayed on mount point %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Set custom Attribute to link
            g.log.debug("Set custom xattribute user.foo to %s:%s",
                        mount_point.client_system, linked_folder_name)
            self.assertTrue(
                set_fattr(mount_point.client_system, linked_folder_name,
                          'user.foo', 'bar2'))
            g.log.info('Successful in set custom attribute to %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Verify that custom xattr for directory is displayed
            # on mount point and bricks
            g.log.debug('Check mountpoint and bricks for custom xattribute')
            self.assertEqual(
                'bar2',
                get_fattr(mount_point.client_system, linked_folder_name,
                          'user.foo'), 'Custom xattribute is not presented on '
                'mount point %s:%s' %
                (mount_point.client_system, linked_folder_name))
            g.log.info("Custom xattribute is presented on mount point %s:%s",
                       mount_point.client_system, linked_folder_name)
            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix. \
                    format(root=brick_dir,
                           client_index="%s_linked" % mount_index)
                cmd = '[ -f %s ] && echo "yes" || echo "no"' % brick_path
                # Check if link exists
                _, ret, _ = g.run(brick_server, cmd)
                if 'no' in ret:
                    g.log.info("Link %s:%s does not exists", brick_server,
                               brick_path)
                    continue

                self.assertEqual(
                    get_fattr(brick_server, brick_path, 'user.foo'), 'bar2',
                    "Actual: custom attribute not "
                    "found on brick %s:%s" % (brick_server, brick_path))
                g.log.info('Custom xattr for link found on brick %s:%s', brick,
                           brick_path)

            # Delete custom attribute
            g.log.debug('Removing customer attribute on mount point %s:%s',
                        mount_point.client_system, linked_folder_name)
            self.assertTrue(
                delete_fattr(mount_point.client_system, linked_folder_name,
                             'user.foo'), 'Fail on delete xattr user.foo')
            g.log.info('Deleted custom xattr from link %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Verify that custom xattr is not displayed after delete
            # on mount point and on the bricks
            g.log.debug(
                "Check if custom xattr is presented on %s:%s "
                "after deletion", mount_point.client_system,
                linked_folder_name)
            self.assertIsNone(
                get_fattr(mount_point.client_system, linked_folder_name,
                          'user.foo'),
                "Expected: xattr user.foo to be not presented on"
                " %s:%s" % (mount_point.client_system, linked_folder_name))
            g.log.info("Custom xattr user.foo is not presented on %s:%s",
                       mount_point.client_system, linked_folder_name)
            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix. \
                    format(root=brick_dir,
                           client_index="%s_linked" % mount_index)
                cmd = '[ -f %s ] && echo "yes" || echo "no"' % brick_path
                # Check if link exists
                _, ret, _ = g.run(brick_server, cmd)
                if 'no' in ret:
                    g.log.info("Link %s:%s does not exists", brick_server,
                               brick_path)
                    continue

                self.assertIsNone(
                    get_fattr(brick_server, brick_path, 'user.foo'),
                    "Extended custom attribute is presented on "
                    "%s:%s after deletion" % (brick_server, brick_path))
                g.log.info(
                    'Custom attribute is not presented after delete '
                    'from link on brick %s:%s', brick_server, brick_path)

        g.log.info('Directory - custom extended attribute validation getfattr,'
                   ' setfattr is successful')
    def test_subdir_with_quota_limit(self):

        # pylint: disable=too-many-statements
        """
        Mount the volume
        Create 2 subdir on mount point
        dir1-> /level1/subdir1 dir2->/dlevel1/dlevel2/dlevel3/subdir2
        Auth allow - Client1(/level1/subdir1),
        Client2(/dlevel1/dlevel2/dlevel3/subdir2)
        Mount the subdir1 on client 1 and subdir2 on client2
        Enable Quota
        Verify Quota is enabled on volume
        Set quota limit as 1GB and 2GB on both subdirs respectively
        Perform a quota list operation
        Perform IO's on both subdir until quota limit is almost hit for subdir1
        Again Perform a quota list operation
        Run IO's on Client 1.This should fail
        Run IO's on Client2.This should pass
        """

        # Create deep subdirectories  subdir1 and subdir2 on mount point
        ret = mkdir(self.mounts[0].client_system,
                    "%s/level1/subdir1" % self.mounts[0].mountpoint,
                    parents=True)
        self.assertTrue(
            ret, ("Failed to create directory '/level1/subdir1' on"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        ret = mkdir(self.mounts[0].client_system,
                    "%s/dlevel1/dlevel2/dlevel3/subdir2" %
                    self.mounts[0].mountpoint,
                    parents=True)
        self.assertTrue(
            ret, ("Failed to create directory "
                  "'/dlevel1/dlevel2/dlevel3/subdir2' on"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes Unmount failed")
        g.log.info("Volumes Unmounted successfully")

        # Set authentication on the subdirectory subdir1
        # and subdir2
        g.log.info(
            'Setting authentication on directories subdir1 and subdir2'
            'for client %s and %s', self.clients[0], self.clients[1])
        ret = set_auth_allow(
            self.volname, self.mnode, {
                '/level1/subdir1': [self.clients[0]],
                '/dlevel1/dlevel2/dlevel3/subdir2': [self.clients[1]]
            })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        # Creating mount list for subdirectories
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/level1/subdir1" % self.volname
        self.subdir_mounts[1].volname = ("%s/dlevel1/dlevel2/dlevel3/subdir2" %
                                         self.volname)

        # Mount Subdirectory "subdir1" on client 1 and "subdir2" on client 2
        for mount_obj in self.subdir_mounts:
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount  %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully mounted subdirectories on client1"
                   "and clients 2")

        # Enable quota on volume
        g.log.info("Enabling quota on the volume %s", self.volname)
        ret, _, _ = quota_enable(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to enable quota on the volume "
                                  "%s", self.volname))
        g.log.info("Successfully enabled quota on the volume %s", self.volname)

        # Check if quota is enabled
        g.log.info("Validate Quota is enabled on the volume %s", self.volname)
        ret = is_quota_enabled(self.mnode, self.volname)
        self.assertTrue(
            ret, ("Quota is not enabled on the volume %s", self.volname))
        g.log.info("Successfully Validated quota is enabled on volume %s",
                   self.volname)

        # Setting up path to set quota limit

        path1 = "/level1/subdir1"
        path2 = "/dlevel1/dlevel2/dlevel3/subdir2"

        # Set Quota limit on the subdirectory "subdir1"

        g.log.info("Set Quota Limit on the path %s of the volume %s", path1,
                   self.volname)
        ret, _, _ = quota_limit_usage(self.mnode,
                                      self.volname,
                                      path1,
                                      limit="1GB")
        self.assertEqual(ret, 0, ("Failed to set quota limit on path %s of "
                                  " the volume %s", path1, self.volname))
        g.log.info(
            "Successfully set the Quota limit on %s of the volume "
            "%s", path1, self.volname)

        # Set Quota limit on the subdirectory "subdir2"

        g.log.info("Set Quota Limit on the path %s of the volume %s", path2,
                   self.volname)
        ret, _, _ = quota_limit_usage(self.mnode,
                                      self.volname,
                                      path2,
                                      limit="2GB")
        self.assertEqual(ret, 0, ("Failed to set quota limit on path %s of "
                                  " the volume %s", path2, self.volname))
        g.log.info(
            "Successfully set the Quota limit on %s of the volume "
            "%s", path2, self.volname)

        # Get Quota List on the volume

        g.log.info("Get Quota list on the volume %s", self.volname)
        quota_list = quota_fetch_list(self.mnode, self.volname)

        self.assertIsNotNone(quota_list, ("Failed to get the quota list "
                                          "of the volume %s", self.volname))

        # Check for subdir1 path in quota list

        self.assertIn(
            path1, quota_list.keys(),
            ("%s not part of the quota list %s even if "
             "it is set on the volume %s", path1, quota_list, self.volname))

        # Check for subdir2 path in quota list

        self.assertIn(
            path2, quota_list.keys(),
            ("%s not part of the quota list %s even if "
             "it is set on the volume %s", path2, quota_list, self.volname))
        g.log.info("Successfully listed quota list %s of the "
                   "volume %s", quota_list, self.volname)

        # Create near to 1GB of data on both subdir mounts

        for mount_object in self.subdir_mounts:
            g.log.info("Creating Files on %s:%s", mount_object.client_system,
                       mount_object.mountpoint)
            cmd = ("cd %s ; for i in `seq 1 1023` ;"
                   "do dd if=/dev/urandom of=file$i bs=1M "
                   "count=1;done" % (mount_object.mountpoint))
            ret, _, _ = g.run(mount_object.client_system, cmd)
            self.assertEqual(ret, 0, "Failed to create files on mountpoint")
            g.log.info("Files created successfully on mountpoint")

        # Again Get Quota List on the volume

        g.log.info("Get Quota list on the volume %s", self.volname)
        quota_list = quota_fetch_list(self.mnode, self.volname)

        self.assertIsNotNone(quota_list, ("Failed to get the quota list "
                                          "of the volume %s", self.volname))

        # Check for subdir1 path in quota list

        self.assertIn(
            path1, quota_list.keys(),
            ("%s not part of the quota list %s even if "
             "it is set on the volume %s", path1, quota_list, self.volname))

        # Check for subdir2 path in quota list

        self.assertIn(
            path2, quota_list.keys(),
            ("%s not part of the quota list %s even if "
             "it is set on the volume %s", path2, quota_list, self.volname))
        g.log.info("Successfully listed quota list %s of the "
                   "volume %s", quota_list, self.volname)

        # Again run IO's to check if quota limit is adhere for subdir1

        # Start IO's on subdir1
        g.log.info("Creating Files on %s:%s", self.clients[0],
                   self.subdir_mounts[0].mountpoint)
        cmd = ("cd %s ; for i in `seq 1024 1500` ;"
               "do dd if=/dev/urandom of=file$i bs=1M "
               "count=1;done" % (self.subdir_mounts[0].mountpoint))
        ret, _, _ = g.run(self.clients[0], cmd)
        if ret == 0:
            raise ExecutionError("IO was expected to Fail."
                                 "But it got passed")
        else:
            g.log.info(
                "IO's failed as expected on %s:%s as quota "
                "limit reached already", self.clients[0],
                self.subdir_mounts[0].mountpoint)

        # Start IO's on subdir2
        g.log.info("Creating Files on %s:%s", self.clients[1],
                   self.subdir_mounts[1].mountpoint)
        cmd = ("cd %s ; for i in `seq 1024 1500` ;"
               "do dd if=/dev/urandom of=file$i bs=1M "
               "count=1;done" % (self.subdir_mounts[1].mountpoint))
        ret, _, _ = g.run(self.clients[1], cmd)
        self.assertEqual(ret, 0,
                         ("Failed to create files on %s" % self.clients[1]))
        g.log.info("Files created successfully on %s:%s", self.clients[1],
                   self.subdir_mounts[1].mountpoint)
Exemplo n.º 4
0
    def test_rmdir_dir_when_hash_nonhash_vol_down(self):
        """
        case -2:
        - create dir1 and dir2
        - bring down hashed subvol for dir1
        - bring down a non-hashed subvol for dir2
        - rmdir dir1 should fail with ENOTCONN
        - rmdir dir2 should fail with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object

        # Create dir1 and dir2
        directory_list = []
        for number in range(1, 3):
            directory_list.append('{}/dir{}'.format(self.mountpoint, number))
            ret = mkdir(self.clients[0], directory_list[-1])
            self.assertTrue(ret, ('mkdir failed for %s ' % directory_list[-1]))
            g.log.info("mkdir of directory %s successful", directory_list[-1])

        # Find a non hashed subvolume(or brick)
        nonhashed_subvol, count = find_nonhashed_subvol(
            self.subvols, "/", "dir1")
        self.assertIsNotNone(nonhashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("nonhashed_subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(
            ret, ('Error in bringing down subvolume %s' % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rmdir' on dir1 should fail with ENOTCONN
        ret = rmdir(self.clients[0], directory_list[0])
        self.assertFalse(ret,
                         ('Expected rmdir to fail for %s' % directory_list[0]))
        g.log.info("rmdir of directory %s failed as expected",
                   directory_list[0])

        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success')
        sleep(10)

        # Unmounting and Mounting the volume back to Heal
        ret, _, err = umount_volume(self.clients[1], self.mountpoint)
        self.assertFalse(ret, "Error in creating temp mount %s" % err)

        ret, _, err = mount_volume(self.volname,
                                   mtype='glusterfs',
                                   mpoint=self.mountpoint,
                                   mserver=self.servers[0],
                                   mclient=self.clients[1])
        self.assertFalse(ret, "Error in creating temp mount")

        ret, _, _ = g.run(self.clients[1], ("ls %s/dir1" % self.mountpoint))
        self.assertEqual(ret, 0, "Error in lookup for dir1")
        g.log.info("lookup successful for dir1")

        # This confirms that healing is done on dir1
        ret = validate_files_in_dir(self.clients[0],
                                    directory_list[0],
                                    test_type=LAYOUT_IS_COMPLETE,
                                    file_type=FILETYPE_DIRS)
        self.assertTrue(ret, "validate_files_in_dir for dir1 failed")
        g.log.info("healing successful for dir1")

        # Bring down the hashed subvol
        # Find a hashed subvolume(or brick)
        hashed_subvol, count = find_hashed_subvol(self.subvols, "/", "dir2")
        self.assertIsNotNone(hashed_subvol, "Error in finding nonhashed value")
        g.log.info("hashed_subvol %s", hashed_subvol._host)

        # Bring hashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(
            ret, ('Error in bringing down subvolume %s', self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rmdir' on dir2 should fail with ENOTCONN
        ret = rmdir(self.clients[0], directory_list[1])
        self.assertFalse(ret,
                         ('Expected rmdir to fail for %s' % directory_list[1]))
        g.log.info("rmdir of dir2 directory %s failed as expected",
                   directory_list[1])

        # Cleanup
        # Bring up the subvol - restart the volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success')
        sleep(10)

        # Delete dirs
        for directory in directory_list:
            ret = rmdir(self.clients[0], directory)
            self.assertTrue(ret, ('rmdir failed for %s ' % directory))
            g.log.info("rmdir of directory %s successful", directory)
    def test_create_link_for_directory(self):

        g.log.info("creating a directory at mount point")
        m_point = self.mounts[0].mountpoint
        test_dir_path = 'test_dir'
        fqpath = m_point + '/' + test_dir_path
        flag = mkdir(self.clients[0], fqpath, True)
        self.assertTrue(flag, "failed to create a directory")
        fqpath = m_point + '/' + test_dir_path + '/dir{1..3}'
        flag = mkdir(self.clients[0], fqpath, True)
        self.assertTrue(flag, "failed to create sub directories")
        flag = validate_files_in_dir(self.clients[0],
                                     m_point + '/test_dir',
                                     test_type=k.TEST_LAYOUT_IS_COMPLETE)
        self.assertTrue(flag, "layout of test directory is complete")
        g.log.info("directory created successfully")

        g.log.info("creating a symlink for test_dir")
        sym_link_path = m_point + '/' + 'test_sym_link'
        command = 'ln -s ' + m_point + '/test_dir ' + sym_link_path
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "failed to create symlink for test_dir")

        command = 'stat ' + sym_link_path
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "stat command didn't return the details "
                         "correctly")
        flag = False
        g.log.info("checking if the link is symbolic")
        if 'symbolic link' in out:
            flag = True
        self.assertTrue(flag, "the type of the link is not symbolic")
        g.log.info("the link is symbolic")
        g.log.info("checking if the sym link points to right directory")
        index_start = out.find('->') + 6
        index_end = out.find("\n") - 3
        dir_pointed = out[index_start:index_end]
        flag = False
        if dir_pointed == m_point + '/' + test_dir_path:
            flag = True
        self.assertTrue(flag, "sym link does not point to correct " "location")
        g.log.info("sym link points to right directory")
        g.log.info("The details of the symlink are correct")

        g.log.info("verifying that inode number of the test_dir "
                   "and its sym link are different")
        command = 'ls -id ' + m_point + '/' + \
            test_dir_path + ' ' + sym_link_path
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "inode numbers not retrieved by the "
                         "ls command")
        list_of_inode_numbers = out.split('\n')
        flag = True
        if (list_of_inode_numbers[0].split(' ')[0] ==
                list_of_inode_numbers[1].split(' ')[0]):
            flag = False
        self.assertTrue(
            flag, "the inode numbers of the dir and sym link "
            "are same")
        g.log.info("verified: inode numbers of the test_dir "
                   "and its sym link are different")

        g.log.info("listing the contents of the test_dir from its sym " "link")
        command = 'ls ' + sym_link_path
        ret, out1, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "failed to list the contents using the "
                         "sym link")
        command = 'ls ' + m_point + '/' + test_dir_path
        ret, out2, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(
            ret, 0, "failed to list the contents of the "
            "test_dir using ls command")
        flag = False
        if out1 == out2:
            flag = True
        self.assertTrue(
            flag, "the contents listed using the sym link "
            "are not the same")
        g.log.info("the contents listed using the symlink are"
                   " the same as that of the test_dir")

        g.log.info("verifying that mount point doesn't display important "
                   "xattrs using the symlink")
        command = 'getfattr -d -m . -e hex ' + sym_link_path
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "failed to retrieve xattrs")
        list_xattrs = ['trusted.gfid', 'trusted.glusterfs.dht']
        flag = True
        for xattr in list_xattrs:
            if xattr in out:
                flag = False
        self.assertTrue(
            flag, "important xattrs are being compromised"
            " using the symlink at the mount point")
        g.log.info("verified: mount point doesn't display important "
                   "xattrs using the symlink")

        g.log.info("verifying that mount point shows path info xattr for the"
                   " test_dir and sym link and is same for both")
        path_info_1 = get_pathinfo(self.mounts[0].client_system,
                                   m_point + '/' + test_dir_path)
        path_info_2 = get_pathinfo(self.mounts[0].client_system, sym_link_path)
        if path_info_1 == path_info_2:
            flag = True
        self.assertTrue(
            flag, "pathinfos for test_dir and its sym link "
            "are not same")
        g.log.info("pathinfos for test_dir and its sym link are same")

        g.log.info("verifying readlink on sym link at mount point returns "
                   "the name of the directory")
        command = 'readlink ' + sym_link_path
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "readlink command returned an error")
        flag = False
        if out.rstrip() == m_point + '/' + test_dir_path:
            flag = True
        self.assertTrue(flag, "readlink did not return the path of the "
                        "test_dir")
        g.log.info("readlink successfully returned the path of the test_dir")
    def test_quota_symlink_limit(self):
        """
        Verifying Directory Quota functionality with respect to limit-usage.
        Setting quota limit on a symlink should fail.

        * Enable quota
        * Set a quota limit on the volume
        * Create a directory
        * Create a symlink of the directory
        * Try to set quota limit on the symlink
        """

        # pylint: disable=too-many-statements
        # Enable Quota on the volume
        g.log.info("Enabling quota on the volume %s", self.volname)
        ret, _, _ = quota_enable(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, ("Failed to enable quota on the volume %s", self.volname))
        g.log.info("Successfully enabled quota on the volume %s", self.volname)

        # Path to set the Quota limit
        path = '/'

        # Set Quota limit of 100 MB on the directory 'foo' of the volume
        g.log.info("Set Quota Limit on the volume %s", self.volname)
        ret, _, _ = quota_limit_usage(self.mnode,
                                      self.volname,
                                      path=path,
                                      limit="100MB")
        self.assertEqual(ret, 0, ("Failed to set Quota limit on path %s of "
                                  "the volume %s", path, self.volname))
        g.log.info("Successfully set the Quota limit on the volume %s",
                   self.volname)

        # Create a directory 'foo' from the mount point
        mount_obj = self.mounts[0]
        mount_dir = mount_obj.mountpoint
        client = mount_obj.client_system

        g.log.info("Creating dir named 'foo' from client %s", client)
        ret = mkdir(client, "%s/foo" % mount_dir)
        self.assertTrue(
            ret, "Failed to create dir under %s-%s" % (client, mount_dir))
        g.log.info("Directory 'foo' created successfully")

        # Create a symlink of the directory 'foo' from mount point
        g.log.info("Creating symlink of dir 'foo' from client %s", client)
        cmd = ("cd %s ; ln -s foo bar" % mount_dir)
        ret, _, _ = g.run(client, cmd)
        self.assertEqual(ret, 0, "Failed to create symlink for directory foo")
        g.log.info("Successfully created symlink for the directory foo")

        # Try to set a quota limit on the symlink
        g.log.info("Set Quota Limit on the symlink 'bar' of the volume %s",
                   self.volname)
        ret, _, _ = quota_limit_usage(self.mnode,
                                      self.volname,
                                      path="/bar",
                                      limit="100MB")
        self.assertEqual(ret, 1, ("Failed: Unexpected Quota limit set on the "
                                  "symlink successfully"))
        g.log.info(
            "Successful: Quota limit failed to set on the symlink 'bar'"
            " of the volume %s", self.volname)
Exemplo n.º 7
0
    def test_no_dir(self):
        """
        * Enable quota on the volume
        * Set the quota on the non-existing directory
        * Create the directory as above and set limit
        * Validate the quota on the volume
        * Delete the directory
        * Validate the quota on volume
        * Recreate the directory
        * Validate the quota on volume
        * Check for volume status for all processes being online.
        """
        # Enable Quota
        g.log.info("Enabling quota on the volume %s", self.volname)
        ret, _, _ = quota_enable(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, ("Failed to enable quota on the volume %s", self.volname))
        g.log.info("Successfully enabled quota on the volume %s", self.volname)

        # Non existent path to set quota limit
        path = "/foo"

        # Set Quota limit on /foo of the volume
        g.log.info("Set Quota Limit on the path %s of the volume %s", path,
                   self.volname)
        ret, _, err = quota_limit_usage(self.mnode,
                                        self.volname,
                                        path=path,
                                        limit="1GB")
        self.assertIn("No such file or directory", err, "Quota limit set "
                      "on path /foo which does not exist")

        mount_obj = self.mounts[0]
        mount_dir = mount_obj.mountpoint
        client = mount_obj.client_system

        # Create the directory on which limit was tried to be set
        ret = mkdir(client, "%s/foo" % (mount_dir))
        self.assertTrue(
            ret, ("Failed to create dir under %s-%s", client, mount_dir))
        g.log.info("Directory 'foo' created successfully")

        # Set Quota limit on /foo of the volume
        g.log.info("Set Quota Limit on the path %s of the volume %s", path,
                   self.volname)
        ret, _, err = quota_limit_usage(self.mnode,
                                        self.volname,
                                        path=path,
                                        limit="1GB")
        self.assertEqual(ret, 0, ("Failed to set quota limit on path %s of "
                                  "the volume %s", path, self.volname))
        g.log.info("Successfully set the Quota limit on %s of the volume %s",
                   path, self.volname)

        # Validate quota list
        g.log.info("Get Quota list for foo and see if hardlimit is 1GB")
        ret = quota_validate(self.mnode,
                             self.volname,
                             path=path,
                             hard_limit=1073741824)
        self.assertTrue(ret, "Quota validate Failed for dir foo")

        # Delete the directory
        ret = rmdir(client, "%s/foo" % (mount_dir), force=True)
        self.assertTrue(ret, ("Failed to delete dir /foo"))
        g.log.info("Successfully deleted /foo")

        # Validate quota list
        g.log.info("Get empty quota list")
        quota_list1 = quota_fetch_list(self.mnode, self.volname, path=None)
        self.assertIsNone(quota_list1, ("unexpected quota list entries found"))
        g.log.info("Successfully validated quota limit usage for the "
                   "deleted directory foo")

        # Recreate the same deleted directory
        ret = mkdir(client, "%s/foo" % (mount_dir))
        self.assertTrue(
            ret, ("Failed to create dir under %s-%s", client, mount_dir))
        g.log.info("Directory 'foo' created successfully")

        # Validate quota list
        g.log.info("Get Quota list for foo and see if hardlimit is N/A")
        ret = quota_validate(self.mnode,
                             self.volname,
                             path=path,
                             hard_limit='N/A')
        self.assertTrue(ret, "Quota validate Failed for dir foo")
        g.log.info("Successfully validated quota limit usage for the "
                   "recreated directory foo")

        # Verify volume's all process are online
        g.log.info("Volume %s: Verifying that all process are online",
                   self.volname)
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(
            ret, ("Volume %s : All process are not online ", self.volname))
        g.log.info("Volume %s: All process are online", self.volname)
    def test_subdir_with_removebrick(self):

        # pylint: disable=too-many-statements
        """
        Mount the volume
        Create 2 subdir on client subdir1 and subdir2
        Auth allow - Client1(subdir1,subdir2),Client2(subdir1,subdir2)
        Mount the subdir to their respective clients
        Start IO's on both subdirs
        Perform remove-brick
        Validate on client if subdir's are mounted post remove-brick
        operation is performed
        """
        # Create  directories subdir1 and subdir2 on mount point
        ret = mkdir(self.mounts[0].client_system,
                    "%s/subdir1" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'subdir1' in"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        ret = mkdir(self.mounts[0].client_system,
                    "%s/subdir2" % self.mounts[0].mountpoint)
        self.assertTrue(
            ret, ("Failed to create directory 'subdir2' in"
                  "volume %s from client %s" %
                  (self.mounts[0].volname, self.mounts[0].client_system)))
        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes UnMount failed")
        g.log.info("Volumes UnMounted successfully")

        # Set authentication on the subdirectory subdir1
        # and subdir2 to access by 2 clients
        g.log.info(
            'Setting authentication on subdir1 and subdir2'
            'for client %s and %s', self.clients[0], self.clients[0])
        ret = set_auth_allow(
            self.volname, self.mnode, {
                '/subdir1': [self.clients[0], self.clients[1]],
                '/subdir2': [self.clients[0], self.clients[1]]
            })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        self.mpoint = "/mnt/Mount_Point1"

        # Mount Subdir1 mount on client 1
        _, _, _ = mount_volume("%s/subdir1" % self.volname, self.mount_type,
                               self.mpoint, self.mnode, self.clients[0])

        # Checking subdir1 is mounted or not
        ret = is_mounted("%s/subdir1" % self.volname, self.mpoint, self.mnode,
                         self.clients[0], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir1", self.volname,
                   self.mpoint)

        # Mount Subdir2 mount on client 2
        _, _, _ = mount_volume("%s/subdir2" % self.volname, self.mount_type,
                               self.mpoint, self.mnode, self.clients[1])

        # Checking subdir2 is mounted or not
        ret = is_mounted("%s/subdir2" % self.volname, self.mpoint, self.mnode,
                         self.clients[1], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir2", self.volname,
                   self.mpoint)

        # Start IO on all the subdir mounts.
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/subdir1" % self.volname
        self.subdir_mounts[1].volname = "%s/subdir2" % self.volname
        all_mounts_procs = []
        count = 1
        for mount_obj in self.subdir_mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       self.mpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, self.mpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.subdir_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Perform remove brick operation when subdir is mounted on client
        g.log.info("Start removing bricks from volume")
        ret = shrink_volume(self.mnode, self.volname)
        self.assertTrue(ret, ("Remove brick operation failed on "
                              "%s", self.volname))
        g.log.info("Remove brick operation is successful on "
                   "volume %s", self.volname)

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("All volume %s processes failed to come up "
                              "online", self.volname))
        g.log.info("All volume %s processes came up "
                   "online successfully", self.volname)

        # Log Volume Info and Status after performing remove brick
        g.log.info("Logging volume info and Status after shrinking volume")
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Logging volume info and status failed on "
                              "volume %s", self.volname))
        g.log.info("Successful in logging volume info and status of volume %s",
                   self.volname)

        # Again Checking subdir1 is mounted or not on Client 1
        ret = is_mounted("%s/subdir1" % self.volname, self.mpoint, self.mnode,
                         self.clients[0], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir1", self.volname,
                   self.mpoint)

        # Again Checking subdir2 is mounted or not on Client 2
        ret = is_mounted("%s/subdir2" % self.volname, self.mpoint, self.mnode,
                         self.clients[1], self.mount_type)
        self.assertTrue(ret,
                        "Volume not mounted on mount point: %s" % self.mpoint)
        g.log.info("Volume %s mounted on %s/subdir2", self.volname,
                   self.mpoint)
    def test_rebalance_start_not_fail(self):
        """
        1. On Node N1, Add "transport.socket.bind-address N1" in the
            /etc/glusterfs/glusterd.vol
        2. Create a replicate (1X3) and disperse (4+2) volumes with
            name more than 108 chars
        3. Mount the both volumes using node 1 where you added the
            "transport.socket.bind-address" and start IO(like untar)
        4. Perform add-brick on replicate volume 3-bricks
        5. Start rebalance on replicated volume
        6. Perform add-brick for disperse volume 6 bricks
        7. Start rebalance of disperse volume
        """
        cmd = ("sed -i 's/end-volume/option "
               "transport.socket.bind-address {}\\n&/g' "
               "/etc/glusterfs/glusterd.vol".format(self.mnode))
        disperse = ("disperse_e4upxjmtre7dl4797wedbp7r3jr8equzvmcae9f55t6z1"
                    "ffhrlk40jtnrzgo4n48fjf6b138cttozw3c6of3ze71n9urnjkshoi")
        replicate = ("replicate_e4upxjmtre7dl4797wedbp7r3jr8equzvmcae9f55t6z1"
                     "ffhrlk40tnrzgo4n48fjf6b138cttozw3c6of3ze71n9urnjskahn")

        volnames = (disperse, replicate)
        for volume, vol_name in (("disperse", disperse), ("replicate",
                                                          replicate)):

            bricks_list = form_bricks_list(self.mnode, volume,
                                           6 if volume == "disperse" else 3,
                                           self.servers, self.all_servers_info)
            if volume == "replicate":
                ret, _, _ = volume_create(self.mnode,
                                          replicate,
                                          bricks_list,
                                          replica_count=3)

            else:
                ret, _, _ = volume_create(self.mnode,
                                          disperse,
                                          bricks_list,
                                          force=True,
                                          disperse_count=6,
                                          redundancy_count=2)

            self.assertFalse(
                ret, "Unexpected: Volume create '{}' failed ".format(vol_name))
            ret, _, _ = volume_start(self.mnode, vol_name)
            self.assertFalse(ret, "Failed to start volume")

        # Add entry in 'glusterd.vol'
        ret, _, _ = g.run(self.mnode, cmd)
        self.assertFalse(ret, "Failed to add entry in 'glusterd.vol' file")

        self.list_of_io_processes = []

        # mount volume
        self.mount = ("/mnt/replicated_mount", "/mnt/disperse_mount")
        for mount_dir, volname in zip(self.mount, volnames):
            ret, _, _ = mount_volume(volname, "glusterfs", mount_dir,
                                     self.mnode, self.clients[0])
            self.assertFalse(
                ret, "Failed to mount the volume '{}'".format(mount_dir))

            # Run IO
            # Create a dir to start untar
            # for mount_point in self.mount:
            self.linux_untar_dir = "{}/{}".format(mount_dir, "linuxuntar")
            ret = mkdir(self.clients[0], self.linux_untar_dir)
            self.assertTrue(ret, "Failed to create dir linuxuntar for untar")

            # Start linux untar on dir linuxuntar
            ret = run_linux_untar(self.clients[:1],
                                  mount_dir,
                                  dirs=tuple(['linuxuntar']))
            self.list_of_io_processes += ret
            self.is_io_running = True

        # Add Brick to replicate Volume
        bricks_list = form_bricks_list(self.mnode, replicate, 3, self.servers,
                                       self.all_servers_info, "replicate")
        ret, _, _ = add_brick(self.mnode, replicate, bricks_list, force=True)
        self.assertFalse(ret, "Failed to add-brick '{}'".format(replicate))

        # Trigger Rebalance on the volume
        ret, _, _ = rebalance_start(self.mnode, replicate)
        self.assertFalse(
            ret,
            "Failed to start rebalance on the volume '{}'".format(replicate))

        # Add Brick to disperse Volume
        bricks_list = form_bricks_list(self.mnode, disperse, 6, self.servers,
                                       self.all_servers_info, "disperse")

        ret, _, _ = add_brick(self.mnode, disperse, bricks_list, force=True)
        self.assertFalse(ret, "Failed to add-brick '{}'".format(disperse))

        # Trigger Rebalance on the volume
        ret, _, _ = rebalance_start(self.mnode, disperse)
        self.assertFalse(
            ret, "Failed to start rebalance on the volume {}".format(disperse))

        # Check if Rebalance is completed on both the volume
        for volume in (replicate, disperse):
            ret = wait_for_rebalance_to_complete(self.mnode,
                                                 volume,
                                                 timeout=600)
            self.assertTrue(
                ret,
                "Rebalance is not Compleated on Volume '{}'".format(volume))
Exemplo n.º 10
0
    def test_snap_uss(self):
        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create a volume and mount it.
        2. Perform I/O on mounts
        3. create a .snaps directory and create some files
        4. Create Multiple snapshots of volume
        5. Check info of volume
        6. Enable USS for volume
        7. Validate files created under .snaps
        8. Disable USS
        9. Again Validate the files created under .snaps directory
        """
        # write files on all mounts
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mounts)
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("python %s create_files "
                   "-f 10 --base-file-name file %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate IO
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # starting I/O
        g.log.info("Starting IO on all mounts...")
        for mount_obj in self.mounts:
            self.mpoint = "%s/.snaps" % mount_obj.mountpoint
            ret = file_exists(mount_obj.client_system, self.mpoint)
            if not ret:
                ret = mkdir(mount_obj.client_system, self.mpoint)
                self.assertTrue(ret, "Failed to create .snaps directory")
                g.log.info("Successfully created .snaps directory")
                break
            else:
                # Validate USS running
                g.log.info("Validating USS enabled or disabled")
                ret = is_uss_enabled(self.mnode, self.volname)
                if not ret:
                    break
                else:
                    g.log.info("USS is enabled in volume %s", self.volname)
                    ret, _, _ = disable_uss(self.mnode, self.volname)
                    self.assertEqual(
                        ret, 0, "Failed to disable USS on "
                        " volume %s" % self.volname)
                    g.log.info("USS disabled in Volume %s", self.volname)
                    ret = mkdir(mount_obj.client_system, self.mpoint)
                    self.assertTrue(ret, "Failed to create .snaps directory")
                    g.log.info("Successfully created .snaps directory")
            cmd = ("python %s create_files "
                   "-f 10 --base-file-name foo %s" %
                   (self.script_upload_path, self.mpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # List files under created .snaps directory
        g.log.info("Starting to list files under .snaps directory")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(ret, 0, "Failed to list files under .snaps")
            g.log.info("Successfully Created files under .snaps directory")
            before_uss_enable = out.strip().split('\n')
            # deleting the mount path from list
            del before_uss_enable[0]

        # Create Multiple snapshots for volume
        g.log.info("Creating snapshots")
        self.snaps_list = []
        for snap_count in range(1, 5):
            self.snap = "snap%s" % snap_count
            ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
            self.assertEqual(
                ret, 0, "Failed to create snapshot "
                "%s for volume %s" % (self.snap, self.volname))
            self.snaps_list.append(self.snap)
            g.log.info("Snapshot %s created successfully for volume %s",
                       self.snap, self.volname)
        g.log.info("Snapshot Creation Successful")

        # Activate the snapshots
        g.log.info("Activating snapshots")
        for snap_count in range(1, 5):
            self.snap = "snap%s" % snap_count
            ret, _, _ = snap_activate(self.mnode, self.snap)
            self.assertEqual(ret, 0,
                             ("Failed to activate snapshot %s" % self.snap))
            g.log.info("Snapshot snap%s activated successfully", self.snap)

        # snapshot list
        g.log.info("Starting to list snapshots")
        ret, out, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, "Failed to list snapshot")
        snap_count = out.strip().split("\n")
        self.assertEqual(len(snap_count), 4, "Failed to list all snaps")
        g.log.info("Snapshot list Validated successfully")

        # Enable USS
        g.log.info("Enable USS on volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on cloned volume")
        g.log.info("Successfully enabled USS on Cloned volume")

        # Validate USS running
        g.log.info("Validating USS enabled or disabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, ("USS is disabled in volume %s" % self.volname))
        g.log.info("USS enabled in Volume %s", self.volname)

        # Validate snapshots under .snaps folder
        self.validate_snaps()

        # check snapshots are listed
        g.log.info(".snaps Containing:")
        for mount_obj in self.mounts:
            ret, _, _ = uss_list_snaps(mount_obj.client_system,
                                       mount_obj.mountpoint)
            self.assertEqual(ret, 0, "Failed to list snapshot information")
            g.log.info("Successfully Listed snapshots Created")

        # Disable USS running
        g.log.info("Disable USS on volume")
        ret, _, _ = disable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to disable USS on volume")
        g.log.info("Successfully disabled USS on volume")

        # check snapshots are listed
        g.log.info(".snaps Containing:")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(ret, 0, "Failed to list snapshot information")
            g.log.info("Successfully listed snapshots Created")

        # Validate after disabling USS, all files should be same
        g.log.info("Validate files after disabling uss")
        after_uss_disable = out.strip().split('\n')
        # deleting the mount path from list
        del after_uss_disable[0]
        for files in before_uss_enable:
            self.assertIn(files, after_uss_disable,
                          "Files are Same under .snaps")
        g.log.info("Validated files under .snaps directory")
    def test_quota_file_larger_than_limit(self):
        # pylint: disable=too-many-statements
        """
        Verifying directory Quota functionality with respect to the
        limit-usage option.

        If a limit is set and a file of size larger than limit is created
        then the file creation will stop when it will reach the limit.

        Quota list will show limit-set and size as same.

        * Enable Quota
        * Create a directory from mount point
        * Set a limit of 10 MB on the directory
        * Set Quota soft-timeout and hard-timeout to 0 seconds
        * Create a file of size larger than the Quota limit
          eg. 20 MB file
        * Perform Quota list operation to check if all the fields are
          appropriate such as hard_limit, available_space, sl_exceeded,
          hl_execeeded, etc.
        """
        # Enable Quota
        g.log.info("Enabling Quota on the volume %s", self.volname)
        ret, _, _ = quota_enable(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, ("Failed to enable Quota on the volume %s", self.volname))
        g.log.info("Successfully enabled Quota on the volume %s", self.volname)

        # Path to set the Quota limit
        path = '/foo'

        # Create a directory 'foo' from the mount point
        mount_obj = self.mounts[0]
        mount_dir = mount_obj.mountpoint
        client = mount_obj.client_system

        g.log.info("Creating dir named 'foo' from client %s", client)
        ret = mkdir(client, "%s/foo" % mount_dir)
        self.assertTrue(
            ret, "Failed to create dir under %s-%s" % (client, mount_dir))
        g.log.info("Directory 'foo' created successfully")

        # Set Quota limit of 10 MB on the directory 'foo' of the volume
        g.log.info("Set Quota Limit on the path %s of the volume %s", path,
                   self.volname)
        ret, _, _ = quota_limit_usage(self.mnode,
                                      self.volname,
                                      path=path,
                                      limit="10MB")
        self.assertEqual(ret, 0, ("Failed to set Quota limit on path %s of "
                                  "the volume %s", path, self.volname))
        g.log.info("Successfully set the Quota limit on %s of the volume %s",
                   path, self.volname)

        # Set Quota soft-timeout to 0 seconds
        g.log.info("Set Quota soft timeout:")
        ret, _, _ = quota_set_soft_timeout(self.mnode, self.volname, '0sec')
        self.assertEqual(ret, 0, ("Failed to set soft timeout"))
        g.log.info("Quota soft timeout set successful")

        # Set Quota hard-timeout to 0 second
        g.log.info("Set Quota hard timeout:")
        ret, _, _ = quota_set_hard_timeout(self.mnode, self.volname, '0sec')
        self.assertEqual(ret, 0, ("Failed to set hard timeout"))
        g.log.info("Quota hard timeout set successful")

        # Validate if the Quota limit set is appropriate
        g.log.info(
            "Validate if the Quota limit set is correct for the "
            "directory %s of the volume %s", path, self.volname)
        ret = quota_validate(self.mnode,
                             self.volname,
                             path=path,
                             hard_limit=10485760)
        self.assertTrue(
            ret, ("Quota Limit of 10 MB was not set properly on "
                  "the directory %s of the volume %s", path, self.volname))
        g.log.info(
            "Successfully Validated Quota Limit of 10 MB is set on the"
            " directory %s of the volume %s", path, self.volname)

        # Create a single file of size 20 MB
        g.log.info("Creating Files on %s:%s", client, mount_dir)
        cmd = ("cd %s/foo ; "
               "dd if=/dev/zero of=20MBfile "
               "bs=1M "
               "count=20" % mount_dir)
        ret, _, _ = g.run(client, cmd)
        self.assertEqual(
            ret, 1, "Unexpected: File creation succeeded even "
            "after exceeding the hard-limit")
        g.log.info("Expected: File creation failed after exceeding "
                   "hard-limit")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")

        # Check if the file created above exists
        g.log.info("Checking if the file created exists in the volume %s",
                   self.volname)
        ret = file_exists(client, "%s/foo/20MBfile" % mount_dir)
        self.assertTrue(ret,
                        ("File does not exist in the volume %s", self.volname))
        g.log.info(
            "Successfully validated the presence of file in the "
            "volume %s", self.volname)

        # Validate if the Quota limit set is appropriate
        g.log.info(
            "Validate if the Quota list fields are appropriate for the "
            "directory %s of the volume %s", path, self.volname)
        ret = quota_validate(self.mnode,
                             self.volname,
                             path=path,
                             hard_limit=10485760,
                             avail_space=0,
                             sl_exceeded=True,
                             hl_exceeded=True)
        self.assertTrue(ret, ("Failed to validate the Quota limits on "
                              "the volume %s", self.volname))
        g.log.info(
            "Successfully Validated Quota Limit of 100 MB is set on the"
            " directory %s of the volume %s", path, self.volname)
    def test_quota_volume_subdir_limits(self):
        """
        Verifying directory quota functionality WRT limit-usage on volume
        as well as sub-directories in volume.

        * Enable quota
        * Set a limit of 1 GB on / of volume
        * Create 10 directories on mount point
        * Set a limit of 100 MB on all the sub-directories created
        * Create data inside the sub-directories on mount point till the limits
          are reached
        * Validate if the hard limit and available space fields inside the
          quota list command are appropriate
        """

        # Enable quota on the volume
        g.log.info("Enabling quota on the volume %s", self.volname)
        ret, _, _ = quota_enable(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, ("Failed to enable quota on the volume %s", self.volname))
        g.log.info("Successfully enabled quota on the volume %s", self.volname)

        # Path to set quota limit
        path = "/"

        # Set a limit of 1 GB on the root of the volume
        g.log.info("Set Quota Limit on the path %s of the volume %s", path,
                   self.volname)
        ret, _, _ = quota_limit_usage(self.mnode,
                                      self.volname,
                                      path=path,
                                      limit="1GB")
        self.assertEqual(ret, 0, ("Failed to set quota limit on path %s of "
                                  "the volume %s", path, self.volname))
        g.log.info("Successfully set the Quota limit on %s of the volume %s",
                   path, self.volname)

        # Create 10 directories from the mount point
        mount_obj = self.mounts[0]
        mount_dir = mount_obj.mountpoint
        client = mount_obj.client_system

        g.log.info("Creating directories on %s:%s", client, mount_dir)
        for i in range(1, 11):
            ret = mkdir(client, "%s/foo%s" % (mount_dir, i))
            self.assertTrue(
                ret, ("Failed to create dir under %s-%s", client, mount_dir))
            g.log.info("Directory 'foo%s' created successfully", i)
        g.log.info("Successfully created directories on %s:%s", client,
                   mount_dir)

        # Set a limit of 100 MB on each directory
        g.log.info(
            "Setting a limit of 100 MB on all the directories inside "
            "the volume %s", self.volname)
        for j in range(1, 11):
            dir_name = "/foo" + str(j)
            ret, _, _ = quota_limit_usage(self.mnode,
                                          self.volname,
                                          path=dir_name,
                                          limit="100MB")
            self.assertEqual(ret, 0,
                             ("Failed to set quota limit on path "
                              "%s of the volume %s", dir_name, self.volname))
            g.log.info(
                "Successfully set the Quota limit on /foo%s of "
                "the volume %s", j, self.volname)
        g.log.info(
            "Successfully set the limit of 100 MB on all directories "
            "inside the volume %s", self.volname)

        # Validate if quota limit usage is set properly
        g.log.info("Validate quota limit usage on all directories")
        for k in range(1, 11):
            dir_name = "/foo" + str(k)
            ret = quota_validate(self.mnode,
                                 self.volname,
                                 path=dir_name,
                                 hard_limit=104857600)
            self.assertTrue(ret, ("Failed to validate quota limit usage on the"
                                  "directory %s", dir_name))
            g.log.info(
                "Successfully validated quota limit usage for the "
                "directory %s of volume %s", dir_name, self.volname)

        # Create data inside each directory from mount point
        g.log.info("Creating Files on %s:%s", client, mount_dir)
        for var1 in range(1, 11):
            cmd = ("cd %s/foo%s ; "
                   "for i in `seq 1 100` ; "
                   "do dd if=/dev/zero of=testfile$i "
                   "bs=1M "
                   "count=1 ; "
                   "done" % (mount_dir, var1))
            ret, _, _ = g.run(client, cmd)
            self.assertEqual(ret, 0,
                             ("Failed to create files in /foo%s", var1))
            g.log.info("Files created successfully in /foo%s", var1)
        g.log.info(
            "Files creation is successful on all directories of the "
            "volume %s", self.volname)

        # List the files inside each directory
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")

        # Validate the hard limit and available space fields are appropriate
        g.log.info("Validate quota hard limit and available space on all the "
                   "directories are appropriate")
        for var2 in range(1, 11):
            dir_name = "/foo" + str(var2)
            ret = quota_validate(self.mnode,
                                 self.volname,
                                 path=dir_name,
                                 hard_limit=104857600,
                                 avail_space=0,
                                 sl_exceeded=True,
                                 hl_exceeded=True,
                                 used_space=104857600)
            self.assertTrue(ret,
                            ("Failed to validate quota hard limit and "
                             "available space on the directory %s", dir_name))
            g.log.info(
                "Successfully validated quota hard limit and available"
                " space fields inside quota list for directory %s "
                "of volume %s", dir_name, self.volname)
Exemplo n.º 13
0
    def test_subdir_with_replacebrick(self):

        # pylint: disable=too-many-statements
        """
        Mount the volume
        Create 50 directories on mount point
        Unmount volume
        Auth allow - Client1(subdir25),Client2(subdir15)
        Mount the subdir to their authorized respective clients
        Start IO's on both subdirs
        Perform replace-brick
        Validate on client if subdir's are mounted post replace-brick
        operation is performed
        Stat data on subdirs
        """
        # Create  directories on mount point
        for i in range(0, 50):
            ret = mkdir(self.mounts[0].client_system,
                        "%s/subdir%s" % (self.mounts[0].mountpoint, i))
            self.assertTrue(
                ret,
                ("Failed to create directory %s/subdir%s on"
                 "volume from client %s" %
                 (self.mounts[0].mountpoint, i, self.mounts[0].client_system)))
        g.log.info("Successfully created directories on mount point")

        # unmount volume
        ret = self.unmount_volume(self.mounts)
        self.assertTrue(ret, "Volumes Unmount failed")
        g.log.info("Volumes Unmounted successfully")

        # Set authentication on the subdirectory subdir25 to access by
        # client1 and subdir15 to access by 2 clients
        g.log.info(
            'Setting authentication on subdir25 and subdir15'
            'for client %s and %s', self.clients[0], self.clients[1])
        ret = set_auth_allow(self.volname, self.mnode, {
            '/subdir25': [self.clients[0]],
            '/subdir15': [self.clients[1]]
        })
        self.assertTrue(
            ret, 'Failed to set Authentication on volume %s' % self.volume)

        # Creating mount list for mounting selected subdirs on authorized
        # clients
        self.subdir_mounts = [
            copy.deepcopy(self.mounts[0]),
            copy.deepcopy(self.mounts[1])
        ]
        self.subdir_mounts[0].volname = "%s/subdir25" % self.volname
        self.subdir_mounts[1].volname = "%s/subdir15" % self.volname

        # Mount Subdirectory subdir25 on client 1 and subdir15 on client 2
        for mount_obj in self.subdir_mounts:
            ret = mount_obj.mount()
            self.assertTrue(
                ret, ("Failed to mount  %s on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Successfully mounted %s on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully mounted sub directories on"
                   "authenticated clients")

        # Start IO on all the subdir mounts.
        all_mounts_procs = []
        count = 1
        for mount_obj in self.subdir_mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Get stat of all the files/dirs created.
        g.log.info("Get stat of all the files/dirs created.")
        ret = get_mounts_stat(self.subdir_mounts)
        self.assertTrue(ret, "Stat failed on some of the clients")
        g.log.info("Successfully got stat of all files/dirs created")

        # Log Volume Info and Status before replacing brick from the volume.
        g.log.info(
            "Logging volume info and Status before replacing brick "
            "from the volume %s", self.volname)
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Logging volume info and status failed on "
                              "volume %s", self.volname))
        g.log.info("Successful in logging volume info and status of volume %s",
                   self.volname)

        # Replace brick from a sub-volume
        g.log.info("Replace a brick from the volume")
        ret = replace_brick_from_volume(self.mnode, self.volname, self.servers,
                                        self.all_servers_info)
        self.assertTrue(ret, "Failed to replace  brick from the volume")
        g.log.info("Successfully replaced brick from the volume")

        # Wait for volume processes to be online
        g.log.info("Wait for volume processes to be online")
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("All volume %s processes failed to come up "
                              "online", self.volname))
        g.log.info("All volume %s processes came up "
                   "online successfully", self.volname)

        # Log Volume Info and Status after replacing the brick
        g.log.info(
            "Logging volume info and Status after replacing brick "
            "from the volume %s", self.volname)
        ret = log_volume_info_and_status(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed Logging volume info and status on "
                              "volume %s", self.volname))
        g.log.info("Successful in logging volume info and status of volume %s",
                   self.volname)

        # Wait for self-heal to complete
        g.log.info("Wait for self-heal to complete")
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal has not yet completed')
        g.log.info("self-heal is successful after replace-brick operation")

        # Again validate if subdirectories are still mounted post replace-brick
        for mount_obj in self.subdir_mounts:
            ret = mount_obj.is_mounted()
            self.assertTrue(
                ret, ("Subdirectory %s is not mounted on client"
                      " %s" % (mount_obj.volname, mount_obj.client_system)))
            g.log.info("Subdirectory %s is mounted on client %s",
                       mount_obj.volname, mount_obj.client_system)
        g.log.info("Successfully validated that subdirectories are mounted"
                   "on client1 and clients 2 post replace-brick operation")

        # Validate IO
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.subdir_mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")