def _set_xattr_trusted_foo(self, xattr_val):
     """Sets xattr trusted.foo on all the files"""
     for fname in self.list_of_device_files:
         ret = set_fattr(self.clients[0], fname, 'trusted.foo', xattr_val)
         self.assertTrue(
             ret, "Unable to create custom xattr "
             "for file {}".format(fname))
 def _set_xattr_value(self, fattr_value="bar2"):
     """Set the xattr 'user.foo' as per the value on dir1"""
     # Set the xattr on the dir1
     ret = set_fattr(self.client, '{}/dir1'.format(self.m_point),
                     'user.foo', fattr_value)
     self.assertTrue(ret, "Failed to set the xattr on dir1")
     g.log.info("Successfully set the xattr user.foo with value:"
                " %s on dir1", fattr_value)
Exemplo n.º 3
0
 def set_xattr_user_foo(self, list_of_files, xattr_val):
     """
     sets xattr user.foo on all the files.
     """
     for fname in list_of_files:
         ret = set_fattr(self.client_node, fname, 'user.foo', xattr_val)
         self.assertTrue(
             ret, "Unable to create custom xattr "
             "for file {}".format(fname))
     g.log.info("Successfully created a custom xattr for all files.")
Exemplo n.º 4
0
    def test_nuke_happy_path(self):
        """
        Test case:
        1. Create a distributed volume, start and mount it
        2. Create 1000 dirs and 1000 files under a directory say 'dir1'
        3. Set xattr glusterfs.dht.nuke to "test" for dir1
        4. Validate dir-1 is not seen from mount point
        5. Validate if the entry is moved to '/brickpath/.glusterfs/landfill'
           and deleted eventually.
        """
        # Create 1000 dirs and 1000 files under a directory say 'dir1'
        self.dir_1_path = "{}/dir1/".format(self.mounts[0].mountpoint)
        ret = mkdir(self.first_client, self.dir_1_path)
        self.assertTrue(ret, "Failed to create dir1 on mount point")
        cmd = ("cd {};for i in `seq 1 1000`;do mkdir dir$i;touch file$i;done"
               .format(self.dir_1_path))
        ret, _, _ = g.run(self.first_client, cmd)
        self.assertFalse(ret, "I/O failed at dir1 on mount point")

        # Set xattr glusterfs.dht.nuke to "test" for dir1
        ret = set_fattr(self.first_client, self.dir_1_path,
                        'glusterfs.dht.nuke', 'test')
        self.assertTrue(ret, "Failed to set xattr glusterfs.dht.nuke")

        # Validate dir-1 is not seen from mount point
        ret = get_dir_contents(self.first_client, self.mounts[0].mountpoint)
        self.assertEqual([], ret,
                         "UNEXPECTED: Mount point has files ideally it should "
                         "be empty.")

        # Validate if the entry is moved to '/brickpath/.glusterfs/landfill'
        # and deleted eventually
        for brick_path in get_all_bricks(self.mnode, self.volname):
            node, path = brick_path.split(":")
            path = "{}/.glusterfs/landfill/*/".format(path)
            ret = get_dir_contents(node, path)
            # In case if landfile is already cleaned before checking
            # stop execution of the loop.
            if ret is None:
                g.log.info("Bricks have been already cleaned up.")
                break
            self.assertIsNotNone(ret,
                                 "Files not present in /.glusterfs/landfill"
                                 " dir")
        g.log.info("Successully nuked dir1.")
    def test_delete_dir_with_self_pointing_linkto_files(self):
        """
        Test case:
        1. Create a pure distribute volume with 2 bricks, start and mount it.
        2. Create dir dir0/dir1/dir2 inside which create 1000 files and rename
           all the files.
        3. Start remove-brick operation on the volume.
        4. Check remove-brick status till status is completed.
        5. When remove-brick status is completed stop it.
        6. Go to brick used for remove brick and perform lookup on the files.
        8. Change the linkto xattr value for every file in brick used for
           remove brick to point to itself.
        9. Perfrom rm -rf * from mount point.
        """
        # Create dir /dir0/dir1/dir2
        self.dir_path = "{}/dir0/dir1/dir2/".format(self.mounts[0].mountpoint)
        ret = mkdir(self.first_client, self.dir_path, parents=True)
        self.assertTrue(ret, "Failed to create /dir0/dir1/dir2/ dir")

        # Create 1000 files inside /dir0/dir1/dir2
        ret, _, _ = g.run(
            self.first_client, 'cd %s;for i in {1..1000}; do echo "Test file" '
            '> tfile-$i; done' % self.dir_path)
        self.assertFalse(ret,
                         "Failed to create 1000 files inside /dir0/dir1/dir2")

        # Rename 1000 files present inside /dir0/dir1/dir2
        ret, _, _ = g.run(
            self.first_client, "cd %s;for i in {1..1000};do mv tfile-$i "
            "ntfile-$i;done" % self.dir_path)
        self.assertFalse(ret,
                         "Failed to rename 1000 files inside /dir0/dir1/dir2")
        g.log.info("I/O successful on mount point.")

        # Start remove-brick operation on the volume
        brick = form_bricks_list_to_remove_brick(self.mnode,
                                                 self.volname,
                                                 subvol_num=1)
        self.assertIsNotNone(brick, "Brick_list is empty")
        ret, _, _ = remove_brick(self.mnode, self.volname, brick, 'start')
        self.assertFalse(ret, "Failed to start remov-brick on volume")

        # Check remove-brick status till status is completed
        ret = wait_for_remove_brick_to_complete(self.mnode, self.volname,
                                                brick)
        self.assertTrue(ret, "Remove-brick didn't complete on volume")

        # When remove-brick status is completed stop it
        ret, _, _ = remove_brick(self.mnode, self.volname, brick, 'stop')
        self.assertFalse(ret, "Failed to start remov-brick on volume")
        g.log.info("Successfully started and stopped remove-brick")

        # Go to brick used for remove brick and perform lookup on the files
        node, path = brick[0].split(":")
        path = "{}/dir0/dir1/dir2/".format(path)
        ret, _, _ = g.run(node, 'ls {}*'.format(path))
        self.assertFalse(ret, "Failed to do lookup on %s" % brick[0])

        # Change the linkto xattr value for every file in brick used for
        # remove brick to point to itself
        ret = get_dir_contents(node, path)
        self.assertIsNotNone(ret,
                             "Unable to get files present in dir0/dir1/dir2")

        ret = get_dht_linkto_xattr(node, "{}{}".format(path, ret[0]))
        self.assertIsNotNone(ret, "Unable to fetch dht linkto xattr")

        # Change trusted.glusterfs.dht.linkto from dist-client-0 to
        # dist-client-1 or visa versa according to initial value
        dht_linkto_xattr = ret.split("-")
        if int(dht_linkto_xattr[2]):
            dht_linkto_xattr[2] = "0"
        else:
            dht_linkto_xattr[2] = "1"
        linkto_value = "-".join(dht_linkto_xattr)

        # Set xattr trusted.glusterfs.dht.linkto on all the linkto files
        ret = set_fattr(node, '{}*'.format(path),
                        'trusted.glusterfs.dht.linkto', linkto_value)
        self.assertTrue(ret, "Failed to change linkto file to point to itself")

        # Perfrom rm -rf * from mount point
        ret, _, _ = g.run(self.first_client,
                          "rm -rf {}/*".format(self.mounts[0].mountpoint))
        self.assertFalse(ret, "Failed to run rm -rf * on mount point")
        g.log.info("rm -rf * successful on mount point")
    def test_accessing_file_when_dht_layout_is_stale(self):
        '''
        Description : Checks if a file can be opened and accessed if the dht
                      layout has become stale.

        Steps:
        1. Create, start and mount a volume consisting 2 subvols on 2 clients
        2. Create a dir `dir` and file `dir/file` from client0
        3. Take note of layouts of `brick1`/dir and `brick2`/dir of the volume
        4. Validate for success lookup from only one brick path
        5. Re-assign layouts ie., brick1/dir to brick2/dir and vice-versa
        6. Remove `dir/file` from client0 and recreate same file from client0
           and client1
        7. Validate for success lookup from only one brick path (as layout is
           changed file creation path will be changed)
        8. Validate checksum is matched from both the clients
        '''

        # Will be used in _get_brick_node_and_path
        self.dir_path = '/dir'

        # Will be used in argument to _assert_file_lookup
        file_name = '/file'

        dir_path = self.mounts[0].mountpoint + self.dir_path
        file_path = dir_path + file_name

        client0, client1 = self.clients[0], self.clients[1]
        fattr = 'trusted.glusterfs.dht'
        io_cmd = ('cat /dev/urandom | tr -dc [:space:][:print:] | '
                  'head -c 1K > {}'.format(file_path))

        # Create a dir from client0
        ret = mkdir(self.clients[0], dir_path)
        self.assertTrue(ret, 'Unable to create a directory from mount point')

        # Touch a file with data from client0
        ret, _, _ = g.run(client0, io_cmd)
        self.assertEqual(ret, 0, 'Failed to create a file on mount')

        # Yields `node` and `brick-path` from first brick of each subvol
        gen = self._get_brick_node_and_path()

        # Take note of newly created directory's layout from org_subvol1
        node1, fqpath1 = next(gen)
        layout1 = get_fattr(node1, fqpath1, fattr)
        self.assertIsNotNone(layout1,
                             '{} is not present on {}'.format(fattr, fqpath1))

        # Lookup on file from node1 should fail as `dir/file` will always get
        # hashed to node2 in a 2-brick distribute volume by default
        self._assert_file_lookup(node1,
                                 fqpath1 + file_name,
                                 when='before',
                                 result=False)

        # Take note of newly created directory's layout from org_subvol2
        node2, fqpath2 = next(gen)
        layout2 = get_fattr(node2, fqpath2, fattr)
        self.assertIsNotNone(layout2,
                             '{} is not present on {}'.format(fattr, fqpath2))

        # Lookup on file from node2 should pass
        self._assert_file_lookup(node2,
                                 fqpath2 + file_name,
                                 when='before',
                                 result=True)

        # Set org_subvol2 directory layout to org_subvol1 and vice-versa
        for node, fqpath, layout, vol in ((node1, fqpath1, layout2, (2, 1)),
                                          (node2, fqpath2, layout1, (1, 2))):
            ret = set_fattr(node, fqpath, fattr, layout)
            self.assertTrue(
                ret, 'Failed to set layout of org_subvol{} on '
                'brick {} of org_subvol{}'.format(vol[0], fqpath, vol[1]))

        # Remove file after layout change from client0
        cmd = 'rm -f {}'.format(file_path)
        ret, _, _ = g.run(client0, cmd)
        self.assertEqual(ret, 0, 'Failed to delete file after layout change')

        # Create file with same name as above after layout change from client0
        # and client1
        for client in (client0, client1):
            ret, _, _ = g.run(client, io_cmd)
            self.assertEqual(
                ret, 0, 'Failed to create file from '
                '{} after layout change'.format(client))

        # After layout change lookup on file from node1 should pass
        self._assert_file_lookup(node1,
                                 fqpath1 + file_name,
                                 when='after',
                                 result=True)

        # After layout change lookup on file from node2 should fail
        self._assert_file_lookup(node2,
                                 fqpath2 + file_name,
                                 when='after',
                                 result=False)

        # Take note of checksum from client0 and client1
        checksums = [None] * 2
        for index, mount in enumerate(self.mounts):
            ret, checksums[index] = collect_mounts_arequal(mount, dir_path)
            self.assertTrue(
                ret, 'Failed to get arequal on client {}'.format(
                    mount.client_system))

        # Validate no checksum mismatch
        self.assertEqual(checksums[0], checksums[1],
                         'Checksum mismatch between client0 and client1')

        g.log.info('Pass: Test accessing file on stale layout is complete.')
    def test_heal_for_conservative_merge_with_two_bricks_blame(self):
        """
        1) Create 1x3 volume and fuse mount the volume
        2) On mount created a dir dir1
        3) Pkill glusterfsd on node n1 (b2 on node2 and b3 and node3 up)
        4) touch f{1..10} on the mountpoint
        5) b2 and b3 xattrs would be blaming b1 as files are created while
           b1 is down
        6) Reset the b3 xattrs to NOT blame b1 by using setattr
        7) Now pkill glusterfsd of b2 on node2
        8) Restart glusterd on node1 to bring up b1
        9) Now bricks b1 online , b2 down, b3 online
        10) touch x{1..10} under dir1 itself
        11) Again reset xattr on node3 of b3 so that it doesn't blame b2,
        as done for b1 in step 6
        12) Do restart glusterd on node2 hosting b2 to bring all bricks online
        13) Check for heal info, split-brain and arequal for the bricks
        """
        # pylint: disable=too-many-locals
        # Create dir `dir1/` on mountpont
        path = self.mounts[0].mountpoint + "/dir1"
        ret = mkdir(self.mounts[0].client_system, path, parents=True)
        self.assertTrue(ret, "Directory {} creation failed".format(path))

        all_bricks = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(all_bricks, "Unable to fetch bricks of volume")
        brick1, brick2, brick3 = all_bricks

        # Bring first brick offline
        self._bring_brick_offline_and_check(brick1)

        # touch f{1..10} files on the mountpoint
        cmd = ("cd {mpt}; for i in `seq 1 10`; do touch f$i"
               "; done".format(mpt=path))
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Unable to create files on mountpoint")

        # Check b2 and b3 xattrs are blaming b1 and are same
        self.assertEqual(self._get_fattr_for_the_brick(brick2),
                         self._get_fattr_for_the_brick(brick3),
                         "Both the bricks xattrs are not blaming "
                         "brick: {}".format(brick1))

        # Reset the xattrs of dir1 on b3 for brick b1
        first_xattr_to_reset = "trusted.afr.{}-client-0".format(self.volname)
        xattr_value = "0x000000000000000000000000"
        host, brick_path = brick3.split(":")
        brick_path = brick_path + "/dir1"
        ret = set_fattr(host, brick_path, first_xattr_to_reset, xattr_value)
        self.assertTrue(ret, "Unable to set xattr for the directory")

        # Kill brick2 on the node2
        self._bring_brick_offline_and_check(brick2)

        # Restart glusterd on node1 to bring the brick1 online
        self.assertTrue(restart_glusterd([brick1.split(":")[0]]), "Unable to "
                        "restart glusterd")
        # checking for peer status post glusterd restart
        self._check_peers_status()

        # Check if the brick b1 on node1 is online or not
        online_bricks = get_online_bricks_list(self.mnode, self.volname)
        self.assertIsNotNone(online_bricks, "Unable to fetch online bricks")
        self.assertIn(brick1, online_bricks, "Brick:{} is still offline after "
                                             "glusterd restart".format(brick1))

        # Create 10 files under dir1 naming x{1..10}
        cmd = ("cd {mpt}; for i in `seq 1 10`; do touch x$i"
               "; done".format(mpt=path))
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Unable to create files on mountpoint")

        # Reset the xattrs from brick3 on to brick2
        second_xattr_to_reset = "trusted.afr.{}-client-1".format(self.volname)
        ret = set_fattr(host, brick_path, second_xattr_to_reset, xattr_value)
        self.assertTrue(ret, "Unable to set xattr for the directory")

        # Bring brick2 online
        self.assertTrue(restart_glusterd([brick2.split(":")[0]]), "Unable to "
                        "restart glusterd")
        self._check_peers_status()

        self.assertTrue(are_bricks_online(self.mnode, self.volname, [brick2]))

        # Check are there any files in split-brain and heal completion
        self.assertFalse(is_volume_in_split_brain(self.mnode, self.volname),
                         "Some files are in split brain for "
                         "volume: {}".format(self.volname))
        self.assertTrue(monitor_heal_completion(self.mnode, self.volname),
                        "Conservative merge of files failed")

        # Check arequal checksum of all the bricks is same
        ret, arequal_from_the_bricks = collect_bricks_arequal(all_bricks)
        self.assertTrue(ret, "Arequal is collected successfully across the"
                        " bricks in the subvol {}".format(all_bricks))
        self.assertEqual(len(set(arequal_from_the_bricks)), 1, "Arequal is "
                         "same on all the bricks in the subvol")
    def test_custom_xattr_with_subvol_down_dir_exists(self):
        """
        Description:
        Steps:
        1) Create directories from mount point.
        2) Bring one or more(not all) dht sub-volume(s) down by killing
           processes on that server
        3) Create a custom xattr for dir hashed to down sub-volume and also for
           another dir not hashing to down sub-volumes
           # setfattr -n user.foo -v bar2 <dir>
        4) Verify that custom xattr for directory is displayed on mount point
           and bricks for both directories
           # getfattr -n user.foo <dir>
           # getfattr -n user.foo <brick_path>/<dir>
        5) Modify custom xattr value and verify that custom xattr for directory
           is displayed on mount point and all up bricks
           # setfattr -n user.foo -v ABC <dir>
        6) Verify that custom xattr is not displayed once you remove it on
           mount point and all up bricks
        7) Verify that mount point shows pathinfo xattr for dir hashed to down
           sub-volume and also for dir not hashed to down sub-volumes
           # getfattr -n trusted.glusterfs.pathinfo <dir>
        8) Again create a custom xattr for dir not hashing to down sub-volumes
           # setfattr -n user.foo -v star1 <dir>
        9) Bring up the sub-volumes
        10) Execute lookup on parent directory of both <dir> from mount point
        11) Verify Custom extended attributes for dir1 on all bricks
        """
        # pylint: disable=protected-access
        # Create dir1 on client0
        self._create_dir(dir_name="dir1")

        # Get subvol list
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        self.assertIsNotNone(subvols, "Failed to get subvols")

        # Finding a dir name such that it hashes to a different subvol
        newhash = find_new_hashed(subvols, "/", "dir1")
        new_name = str(newhash.newname)
        new_subvol_count = newhash.subvol_count

        # Create a dir with the new name
        self._create_dir(dir_name=new_name)

        # Kill the brick/subvol to which the new dir hashes
        ret = bring_bricks_offline(
            self.volname, subvols[new_subvol_count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s',
                              subvols[new_subvol_count]))
        g.log.info('DHT subvol %s is offline', subvols[new_subvol_count])

        # Set the xattr on dir hashing to down subvol
        ret = set_fattr(self.client, '{}/{}'.format(self.m_point, new_name),
                        'user.foo', 'bar2')
        self.assertFalse(ret, "Unexpected: custom xattr set successfully"
                              " for dir hashing to down subvol")
        g.log.info("Expected: Failed to set xattr on dir:%s"
                   " which hashes to down subvol due to error: Transport"
                   " endpoint not connected", new_name)

        # Check if the trusted.glusterfs.pathinfo is displayed
        # for dir hashing to down subvol on mointpoint
        ret = get_fattr(self.client, '{}/{}'.format(
            self.m_point, new_name), 'trusted.glusterfs.pathinfo')
        self.assertIsNotNone(ret, "Failed to get the xattr"
                             " on:{}".format(self.client))
        g.log.info("The xattr trusted.glusterfs.pathinfo"
                   " is displayed on mointpoint for %s", new_name)

        # Set the xattr on dir hashing to down subvol
        ret = set_fattr(self.client, '{}/{}'.format(self.m_point, new_name),
                        'user.foo', 'star1')
        self.assertFalse(ret, "Unexpected: custom xattr set successfully"
                              " for dir hashing to down subvol")
        g.log.info("Expected: Tansport endpoint not connected")

        # Calling the local function
        self._create_xattr_check_self_heal()
    def test_directory_custom_extended_attr(self):
        """Test - set custom xattr to directory and link to directory
        """
        # pylint: disable = too-many-statements
        dir_prefix = '{root}/folder_{client_index}'

        for mount_index, mount_point in enumerate(self.mounts):
            folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                            client_index=mount_index)

            # Create a directory from mount point
            g.log.info('Creating directory : %s:%s', mount_point.mountpoint,
                       folder_name)
            ret = mkdir(mount_point.client_system, folder_name)
            self.assertTrue(
                ret, 'Failed to create directory %s on mount point %s' %
                (folder_name, mount_point.mountpoint))

            ret = file_exists(mount_point.client_system, folder_name)
            self.assertTrue(
                ret, 'Created Directory %s does not exists on mount '
                'point %s' % (folder_name, mount_point.mountpoint))
            g.log.info('Created directory %s:%s', mount_point.mountpoint,
                       folder_name)

            # Verify that hash layout values are set on each
            # bricks for the dir
            g.log.debug("Verifying hash layout values")
            ret = validate_files_in_dir(mount_point.client_system,
                                        mount_point.mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_DIR)
            self.assertTrue(
                ret, "Expected - Directory is stored "
                "on hashed bricks")
            g.log.info("Hash layout values are set on each bricks")

            # Verify that mount point should not display
            # xattr : trusted.gfid and dht
            g.log.debug("Loading extra attributes")
            ret = get_fattr_list(mount_point.client_system, folder_name)

            self.assertTrue(
                'trusted.gfid' not in ret,
                "Extended attribute trusted.gfid is presented on "
                "mount point %s and folder %s" %
                (mount_point.mountpoint, folder_name))
            self.assertTrue(
                'trusted.glusterfs.dht' not in ret,
                "Extended attribute trusted.glusterfs.dht is "
                "presented on mount point %s and folder %s" %
                (mount_point.mountpoint, folder_name))

            g.log.info(
                'Extended attributes trusted.gfid and '
                'trusted.glusterfs.dht does not exists on '
                'mount point %s:%s ', mount_point.mountpoint, folder_name)

            # Verify that mount point shows pathinfo xattr
            g.log.debug("Check for xattr trusted.glusterfs.pathinfo on %s:%s",
                        mount_point, folder_name)
            ret = get_fattr(mount_point.client_system,
                            mount_point.mountpoint,
                            'trusted.glusterfs.pathinfo',
                            encode="text")
            self.assertIsNotNone(
                ret, "trusted.glusterfs.pathinfo is not "
                "presented on %s:%s" % (mount_point.mountpoint, folder_name))
            g.log.info(
                'pathinfo xattr is displayed on mount point %s and '
                'dir %s', mount_point.mountpoint, folder_name)

            # Create a custom xattr for dir
            g.log.info("Set attribute user.foo to %s", folder_name)
            ret = set_fattr(mount_point.client_system, folder_name, 'user.foo',
                            'bar2')
            self.assertTrue(
                ret, "Setup custom attribute on %s:%s failed" %
                (mount_point.client_system, folder_name))

            g.log.info('Set custom attribute is set on %s:%s',
                       mount_point.client_system, folder_name)
            # Verify that custom xattr for directory is displayed
            # on mount point and bricks
            g.log.debug('Check xarttr user.foo on %s:%s',
                        mount_point.client_system, folder_name)
            ret = get_fattr(mount_point.client_system,
                            folder_name,
                            'user.foo',
                            encode="text")
            self.assertEqual(
                ret, 'bar2', "Xattr attribute user.foo is not presented on "
                "mount point %s and directory %s" %
                (mount_point.client_system, folder_name))

            g.log.info(
                'Custom xattr user.foo is presented on mount point'
                ' %s:%s ', mount_point.client_system, folder_name)

            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix.format(root=brick_dir,
                                               client_index=mount_index)

                ret = get_fattr(brick_server,
                                brick_path,
                                'user.foo',
                                encode="text")

                g.log.debug('Check custom xattr for directory on brick %s:%s',
                            brick_server, brick_path)
                self.assertEqual(
                    'bar2', ret, "Expected: user.foo should be on brick %s\n"
                    "Actual: Value of attribute foo.bar %s" %
                    (brick_path, ret))
                g.log.info('Custom xattr is presented on brick %s', brick_path)

            # Delete custom attribute
            ret = delete_fattr(mount_point.client_system, folder_name,
                               'user.foo')
            self.assertTrue(ret, "Failed to delete custom attribute")

            g.log.info('Removed custom attribute from directory %s:%s',
                       mount_point.client_system, folder_name)
            # Verify that custom xattr is not displayed after delete
            # on mount point and on the bricks

            g.log.debug('Looking if custom extra attribute user.foo is '
                        'presented on mount or on bricks after deletion')
            self.assertIsNone(
                get_fattr(mount_point.client_system,
                          folder_name,
                          'user.foo',
                          encode="text"),
                "Xattr user.foo is presented on mount point"
                " %s:%s after deletion" %
                (mount_point.mountpoint, folder_name))

            g.log.info(
                "Xattr user.foo is not presented after deletion"
                " on mount point %s:%s", mount_point.mountpoint, folder_name)

            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix.format(root=brick_dir,
                                               client_index=mount_index)
                self.assertIsNone(
                    get_fattr(brick_server, brick_path, 'user.foo'),
                    "Deleted xattr user.foo is presented on "
                    "brick %s:%s" % (brick, brick_path))
                g.log.info(
                    'Custom attribute is not presented after delete '
                    'from directory on brick %s:%s', brick, brick_path)

        # Repeat all of the steps for link of created directory
        for mount_index, mount_point in enumerate(self.mounts):
            linked_folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                                   client_index="%s_linked" %
                                                   mount_index)
            folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                            client_index=mount_index)
            # Create link to created dir
            command = 'ln -s {src} {dst}'.format(dst=linked_folder_name,
                                                 src=folder_name)
            ret, _, _ = g.run(mount_point.client_system, command)
            self.assertEqual(
                0, ret, 'Failed to create link %s to directory %s' %
                (linked_folder_name, folder_name))
            self.assertTrue(
                file_exists(mount_point.client_system, linked_folder_name),
                'Link does not exists on %s:%s' %
                (mount_point.client_system, linked_folder_name))
            g.log.info('Create link %s to directory %s', linked_folder_name,
                       folder_name)

            # Verify that hash layout values are set on each
            # bricks for the link to dir
            g.log.debug("Verifying hash layout values")
            ret = validate_files_in_dir(mount_point.client_system,
                                        mount_point.mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_LINK)
            self.assertTrue(
                ret, "Expected - Link to directory is stored "
                "on hashed bricks")
            g.log.info("Hash layout values are set on each bricks")

            # Verify that mount point should not display xattr :
            # trusted.gfid and dht
            g.log.debug("Loading extra attributes")
            ret = get_fattr_list(mount_point.client_system, linked_folder_name)

            self.assertTrue(
                'trusted.gfid' not in ret,
                "Extended attribute trudted.gfid is presented on "
                "mount point %s and folder %s" %
                (mount_point.mountpoint, linked_folder_name))

            self.assertTrue(
                'trusted.glusterfs.dht' not in ret,
                "Extended attribute trusted.glusterfs.dht is "
                "presented on mount point %s and folder %s" %
                (mount_point.mountpoint, linked_folder_name))

            g.log.info(
                'Extended attributes trusted.gfid and '
                'trusted.glusterfs.dht does not exists on '
                'mount point %s:%s ', mount_point.mountpoint,
                linked_folder_name)

            # Verify that mount point shows pathinfo xattr
            g.log.debug("Check if pathinfo is presented on %s:%s",
                        mount_point.client_system, linked_folder_name)
            self.assertIsNotNone(
                get_fattr(mount_point.client_system, mount_point.mountpoint,
                          'trusted.glusterfs.pathinfo'),
                "pathinfo is not displayed on mountpoint "
                "%s:%s" % (mount_point.client_system, linked_folder_name))
            g.log.info('pathinfo value is displayed on mount point %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Set custom Attribute to link
            g.log.debug("Set custom xattribute user.foo to %s:%s",
                        mount_point.client_system, linked_folder_name)
            self.assertTrue(
                set_fattr(mount_point.client_system, linked_folder_name,
                          'user.foo', 'bar2'))
            g.log.info('Successful in set custom attribute to %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Verify that custom xattr for directory is displayed
            # on mount point and bricks
            g.log.debug('Check mountpoint and bricks for custom xattribute')
            self.assertEqual(
                'bar2',
                get_fattr(mount_point.client_system,
                          linked_folder_name,
                          'user.foo',
                          encode="text"),
                'Custom xattribute is not presented on '
                'mount point %s:%s' %
                (mount_point.client_system, linked_folder_name))
            g.log.info("Custom xattribute is presented on mount point %s:%s",
                       mount_point.client_system, linked_folder_name)
            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix. \
                    format(root=brick_dir,
                           client_index="%s_linked" % mount_index)
                cmd = '[ -f %s ] && echo "yes" || echo "no"' % brick_path
                # Check if link exists
                _, ret, _ = g.run(brick_server, cmd)
                if 'no' in ret:
                    g.log.info("Link %s:%s does not exists", brick_server,
                               brick_path)
                    continue

                self.assertEqual(
                    get_fattr(brick_server,
                              brick_path,
                              'user.foo',
                              encode="text"), 'bar2',
                    "Actual: custom attribute not "
                    "found on brick %s:%s" % (brick_server, brick_path))
                g.log.info('Custom xattr for link found on brick %s:%s', brick,
                           brick_path)

            # Delete custom attribute
            g.log.debug('Removing customer attribute on mount point %s:%s',
                        mount_point.client_system, linked_folder_name)
            self.assertTrue(
                delete_fattr(mount_point.client_system, linked_folder_name,
                             'user.foo'), 'Fail on delete xattr user.foo')
            g.log.info('Deleted custom xattr from link %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Verify that custom xattr is not displayed after delete
            # on mount point and on the bricks
            g.log.debug(
                "Check if custom xattr is presented on %s:%s "
                "after deletion", mount_point.client_system,
                linked_folder_name)
            self.assertIsNone(
                get_fattr(mount_point.client_system,
                          linked_folder_name,
                          'user.foo',
                          encode="text"),
                "Expected: xattr user.foo to be not presented on"
                " %s:%s" % (mount_point.client_system, linked_folder_name))
            g.log.info("Custom xattr user.foo is not presented on %s:%s",
                       mount_point.client_system, linked_folder_name)
            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix. \
                    format(root=brick_dir,
                           client_index="%s_linked" % mount_index)
                cmd = '[ -f %s ] && echo "yes" || echo "no"' % brick_path
                # Check if link exists
                _, ret, _ = g.run(brick_server, cmd)
                if 'no' in ret:
                    g.log.info("Link %s:%s does not exists", brick_server,
                               brick_path)
                    continue

                self.assertIsNone(
                    get_fattr(brick_server,
                              brick_path,
                              'user.foo',
                              encode="text"),
                    "Extended custom attribute is presented on "
                    "%s:%s after deletion" % (brick_server, brick_path))
                g.log.info(
                    'Custom attribute is not presented after delete '
                    'from link on brick %s:%s', brick_server, brick_path)

        g.log.info('Directory - custom extended attribute validation getfattr,'
                   ' setfattr is successful')