Beispiel #1
0
    def _compare_gfid_xattr_on_files(self, link_file_name, expected=True):
        """Collect and compare the value of trusted.gfid xattr for file
        on backend-bricks"""
        brick_list_test_file = get_pathinfo(
            self.client, "{}/test_file".format(self.m_point))
        xattr_list_test_file = []
        for brick in brick_list_test_file['brickdir_paths']:
            host, path = brick.split(':')
            xattr_test_file = get_fattr(host, path, "trusted.gfid")
            xattr_list_test_file.append(xattr_test_file)

        brick_list_link_file = get_pathinfo(
            self.client, "{}/{}".format(self.m_point, link_file_name))
        xattr_list_link_file = []
        for brick in brick_list_link_file['brickdir_paths']:
            host, path = brick.split(':')
            xattr_link_file = get_fattr(host, path, "trusted.gfid")
            xattr_list_link_file.append(xattr_link_file)

        if expected is True:
            self.assertEqual(
                xattr_list_test_file, xattr_list_link_file,
                "Unexpected: The xattr trusted.gfid is not same "
                "for test_file and {}".format(link_file_name))
            g.log.info(
                "The xattr trusted.gfid is same for test_file"
                " and %s", link_file_name)
        else:
            self.assertNotEqual(
                xattr_list_test_file, xattr_list_link_file,
                "Unexpected: The xattr trusted.gfid is same "
                "for test_file and {}".format(link_file_name))
            g.log.info(
                "The xattr trusted.gfid is not same for test_file"
                " and %s", link_file_name)
Beispiel #2
0
    def check_mount_point_and_bricks_for_xattr(self, list_of_all_files):
        """
        Check xattr on mount point and bricks.
        """
        # Check if xattr is visable from mount point
        for mount_object in self.mounts:
            for fname in list_of_all_files:
                ret = get_fattr(mount_object.client_system, fname, 'user.foo')
                self.assertIsNone(
                    ret, "Custom attribute visible at mount "
                    "point even after deletion")

        # Check if xattr is visable from bricks
        for brick in get_all_bricks(self.mnode, self.volname):
            node, brick_path = brick.split(':')
            files_on_bricks = get_dir_contents(node, brick_path)
            files = [
                fname.split('/')[3] for fname in self.list_of_files
                if fname.split('/')[3] in files_on_bricks
            ]
            for fname in files:
                ret = get_fattr(node, "{}/{}".format(brick_path, fname),
                                'user.foo')
                self.assertIsNone(
                    ret, "Custom attribute visible on "
                    "brick even after deletion")

        g.log.info("Custom xattr for file is not visible on "
                   "mount point and bricks")
Beispiel #3
0
    def check_custom_xattr_visible(self, xattr_val):
        """
        Check custom xttar from mount point and on bricks.
        """
        # Check custom xattr from mount point
        for mount_object in self.mounts:
            for fname in self.files_and_soft_links:
                attr_val = get_fattr(mount_object.client_system, fname,
                                     'user.foo')
                self.assertEqual(attr_val, xattr_val,
                                 "Custom xattr not found from mount.")
        g.log.info("Custom xattr found on mount point.")

        # Check custom xattr on bricks
        for brick in get_all_bricks(self.mnode, self.volname):
            node, brick_path = brick.split(':')
            files_on_bricks = get_dir_contents(node, brick_path)
            files = [
                fname.split('/')[3] for fname in self.list_of_files
                if fname.split('/')[3] in files_on_bricks
            ]
            for fname in files:
                attr_val = get_fattr(node, "{}/{}".format(brick_path, fname),
                                     'user.foo')
                self.assertEqual(attr_val, xattr_val,
                                 "Custom xattr not visible on bricks")
        g.log.info("Custom xattr found on bricks.")
    def _check_custom_xattr_trusted_foo(self, xattr_val, visible=True):
        """Check custom xttar from mount point and on bricks."""
        # Check custom xattr from mount point
        for fname in self.list_of_device_files:
            ret = get_fattr(self.clients[0],
                            fname,
                            'trusted.foo',
                            encode='text')
            if visible:
                self.assertEqual(ret, xattr_val,
                                 "Custom xattr not found from mount.")
            else:
                self.assertIsNone(
                    ret, "Custom attribute visible at mount "
                    "point even after deletion")

        # Check custom xattr on bricks
        for brick in get_all_bricks(self.mnode, self.volname):
            node, brick_path = brick.split(':')
            files_on_bricks = get_dir_contents(node, brick_path)
            files = [
                fname for fname in self.file_names if fname in files_on_bricks
            ]
            for fname in files:
                ret = get_fattr(node,
                                "{}/{}".format(brick_path, fname),
                                'trusted.foo',
                                encode='text')
                if visible:
                    self.assertEqual(ret, xattr_val,
                                     "Custom xattr not visible on bricks")
                else:
                    self.assertIsNone(
                        ret, "Custom attribute visible on "
                        "brick even after deletion")
Beispiel #5
0
 def check_for_trusted_glusterfs_pathinfo(self, list_of_files):
     """
     Check if trusted.glusterfs.pathinfo is visible.
     """
     for fname in list_of_files:
         ret = get_fattr(self.client_node, fname,
                         'trusted.glusterfs.pathinfo')
         self.assertIsNotNone(ret, "pathinfo not visible")
     g.log.info("Mount point shows pathinfo xattr for " "all files")
 def _check_xattr_value_on_mnt(self, expected_value=None):
     """Check if the expected value for 'user.foo'
     is present for dir1 on mountpoint"""
     ret = get_fattr(self.client, '{}/dir1'.format(self.m_point),
                     'user.foo', encode="text")
     self.assertEqual(ret, expected_value, "Failed to get the xattr"
                      " on:{}".format(self.client))
     g.log.info(
         "The xattr user.foo for dir1 is displayed on mointpoint"
         " and has value:%s", expected_value)
 def _check_trusted_glusterfs_dht_on_all_bricks(self):
     """Check trusted.glusterfs.dht xattr on the backend bricks"""
     bricks = get_all_bricks(self.mnode, self.volname)
     fattr_value = []
     for brick_path in bricks:
         node, path = brick_path.split(":")
         ret = get_fattr(node, "{}".format(path), "trusted.glusterfs.dht")
         fattr_value += [ret]
     self.assertEqual(len(set(fattr_value)), 4,
                      "Value of trusted.glusterfs.dht is not as expected")
     g.log.info("Successfully checked value of trusted.glusterfs.dht.")
 def _check_xattr_value_on_bricks(self, online_bricks, expected_value=None):
     """Check if the expected value for 'user.foo'is present
     for dir1 on backend bricks"""
     for brick in online_bricks:
         host, brick_path = brick.split(':')
         ret = get_fattr(host, '{}/dir1'.format(brick_path),
                         'user.foo', encode="text")
         self.assertEqual(ret, expected_value, "Failed to get the xattr"
                                               " on:{}".format(brick_path))
         g.log.info("The xattr user.foo is displayed for dir1 on "
                    "brick:%s and has value:%s",
                    brick_path, expected_value)
    def are_mdata_xattrs_equal(self):
        """Check if atime/mtime/ctime in glusterfs.mdata xattr are identical"""
        timestamps = []
        for brick_path in self.bricks_list:
            server, brick = brick_path.split(':')
            fattr = get_fattr(server, '%s/%s' % (brick, "dir1"),
                              'trusted.glusterfs.mdata')
            self.assertIsNotNone(fattr, 'Unable to get mdata xattr')
            timestamps.append(fattr)

        g.log.debug("mdata list = %s", ''.join(map(str, timestamps)))
        return timestamps.count(timestamps[0]) == len(timestamps)
    def verify_gfid_and_link_count(self, dirname, filename):
        """
        check that the dir and all files under it have the same gfid on all 3
        bricks and that they have the .glusterfs entry as well.
        """
        dir_gfids = dict()
        file_gfids = dict()
        bricks_list = get_all_bricks(self.mnode, self.volname)
        for brick in bricks_list:
            brick_node, brick_path = brick.split(":")

            ret = get_fattr(brick_node, '%s/%s' % (brick_path, dirname),
                            'trusted.gfid')
            self.assertIsNotNone(
                ret, "trusted.gfid is not presented "
                "on %s/%s" % (brick_path, dirname))
            dir_gfids.setdefault(dirname, []).append(ret)

            ret = get_fattr(brick_node,
                            '%s/%s/%s' % (brick_path, dirname, filename),
                            'trusted.gfid')
            self.assertIsNotNone(
                ret, "trusted.gfid is not presented on "
                "%s/%s/%s" % (brick_path, dirname, filename))
            file_gfids.setdefault(filename, []).append(ret)

            stat_data = get_file_stat(
                brick_node, "%s/%s/%s" % (brick_path, dirname, filename))
            self.assertEqual(stat_data["links"], "2", 'Link count is not 2')

        for key in dir_gfids:
            self.assertTrue(
                all(value == dir_gfids[key][0] for value in dir_gfids[key]),
                'gfids do not '
                'match for %s on all bricks' % dirname)
        for key in file_gfids:
            self.assertTrue(
                all(value == file_gfids[key][0] for value in file_gfids[key]),
                'gfids do not '
                'match for %s/%s on all bricks' % (dirname, filename))
Beispiel #11
0
 def verify_gfid(self, dirname):
     dir_gfids = dict()
     bricks_list = get_all_bricks(self.mnode, self.volname)
     for brick in bricks_list:
         brick_node, brick_path = brick.split(":")
         ret = get_fattr(brick_node, '%s/%s' % (brick_path, dirname),
                         'trusted.gfid')
         self.assertIsNotNone(ret, "trusted.gfid is not present on %s/%s"
                              % (brick_path, dirname))
         dir_gfids.setdefault(dirname, []).append(ret)
         for key in dir_gfids:
             self.assertTrue(all(value == dir_gfids[key][0]
                                 for value in dir_gfids[key]),
                             "gfid mismatch for %s" % dirname)
 def _check_trusted_glusterfs_dht_on_all_bricks(self):
     """Check trusted.glusterfs.dht xattr on the backend bricks"""
     bricks = get_all_bricks(self.mnode, self.volname)
     possible_values = [
         "0x000000000000000000000000ffffffff",
         "0x00000000000000000000000000000000"
     ]
     for brick_path in bricks:
         node, path = brick_path.split(":")
         ret = get_fattr(node, "{}/dir".format(path),
                         "trusted.glusterfs.dht")
         self.assertEqual(
             ret, possible_values[bricks.index(brick_path)],
             "Value of trusted.glusterfs.dht is not as expected")
     g.log.info("Successfully checked value of trusted.glusterfs.dht.")
    def check_xattr(self, list_of_all_dirs):
        """
        Check the custom xattr on backend bricks for the directories.

        Args:
        list_of_all_dirs(list): List of dirs created on mount.

        Returns:
        Success/failure msg.
        """
        for direc in list_of_all_dirs:
            for brick in get_all_bricks(self.mnode, self.volname):
                host, brick_path = brick.split(':')
                brick_dir_path = brick_path + '/' + direc
                ret = get_fattr(host, brick_dir_path, 'user.foo')
                self.assertIsNotNone(ret, "Custom xattr is not displayed on"
                                     " the backend bricks ")
                g.log.info("Custom xattr %s is displayed on the back-end"
                           " bricks", ret)
    def _create_xattr_check_self_heal(self):
        """Create custom xattr and check if its healed"""
        # Set the xattr on the dir1
        self._set_xattr_value(fattr_value="bar2")

        # Get online brick list
        online_bricks = get_online_bricks_list(self.mnode, self.volname)
        self.assertIsNotNone(online_bricks, "Failed to get online bricks")

        # Check if the custom xattr is being displayed on the
        # mount-point for dir1
        self._check_xattr_value_on_mnt(expected_value="bar2")

        # Check if the xattr is being displayed on the online-bricks
        # for dir1
        self._check_xattr_value_on_bricks(online_bricks, expected_value="bar2")

        # Modify custom xattr value on dir1
        self._set_xattr_value(fattr_value="ABC")

        # Lookup on moint-point to refresh the value of xattr
        self._perform_lookup()

        # Check if the modified custom xattr is being displayed
        # on the mount-point for dir1
        self._check_xattr_value_on_mnt(expected_value="ABC")

        # Check if the modified custom xattr is being
        # displayed on the bricks for dir1
        self._check_xattr_value_on_bricks(online_bricks, expected_value="ABC")

        # Remove the custom xattr from the mount point for dir1
        ret = delete_fattr(self.client,
                           '{}/dir1'.format(self.m_point), 'user.foo')
        self.assertTrue(ret, "Failed to delete the xattr for "
                             "dir1 on mountpoint")
        g.log.info(
            "Successfully deleted the xattr for dir1 from mountpoint")

        # Lookup on moint-point to refresh the value of xattr
        self._perform_lookup()

        # Check that the custom xattr is not displayed on the
        # for dir1 on mountpoint
        ret = get_fattr(self.client, '{}/dir1'.format(self.m_point),
                        'user.foo', encode="text")
        self.assertEqual(ret, None, "Xattr for dir1 is not removed"
                         " on:{}".format(self.client))
        g.log.info("Success: xattr is removed for dir1 on mointpoint")

        # Check that the custom xattr is not displayed on the
        # for dir1 on the backend bricks
        for brick in online_bricks:
            host, brick_path = brick.split(':')
            ret = get_fattr(host, '{}/dir1'.format(brick_path),
                            'user.foo', encode="text")
            self.assertEqual(ret, None, "Xattr for dir1 is not removed"
                                        " on:{}".format(brick_path))
            g.log.info("Xattr for dir1 is removed from "
                       "brick:%s", brick_path)

        # Check if the trusted.glusterfs.pathinfo is displayed
        # for dir1 on mointpoint
        ret = get_fattr(self.client, '{}/dir1'.format(self.m_point),
                        'trusted.glusterfs.pathinfo')
        self.assertIsNotNone(ret, "Failed to get the xattr"
                             " on:{}".format(self.client))
        g.log.info("The xattr trusted.glusterfs.pathinfo"
                   " is displayed on mointpoint for dir1")

        # Set the xattr on the dir1
        self._set_xattr_value(fattr_value="star1")

        # Bring back the bricks online
        ret, _, _ = volume_start(self.mnode, self.volname, force=True)
        self.assertFalse(ret, 'Failed to start volume %s with "force" option'
                         % self.volname)
        g.log.info('Successfully started volume %s with "force" option',
                   self.volname)

        # Execute lookup on the mointpoint
        self._perform_lookup()

        # Get online brick list
        online_bricks = get_online_bricks_list(self.mnode, self.volname)
        self.assertIsNotNone(online_bricks, "Failed to get online bricks")

        # Check if the custom xattr is being displayed
        # on the mount-point for dir1
        self._check_xattr_value_on_mnt(expected_value="star1")

        # Check if the custom xattr is displayed on all the bricks
        self._check_xattr_value_on_bricks(online_bricks,
                                          expected_value="star1")
    def test_directory_custom_extended_attr(self):
        """Test - set custom xattr to directory and link to directory
        """
        # pylint: disable = too-many-statements
        dir_prefix = '{root}/folder_{client_index}'

        for mount_index, mount_point in enumerate(self.mounts):
            folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                            client_index=mount_index)

            # Create a directory from mount point
            g.log.info('Creating directory : %s:%s', mount_point.mountpoint,
                       folder_name)
            ret = mkdir(mount_point.client_system, folder_name)
            self.assertTrue(
                ret, 'Failed to create directory %s on mount point %s' %
                (folder_name, mount_point.mountpoint))

            ret = file_exists(mount_point.client_system, folder_name)
            self.assertTrue(
                ret, 'Created Directory %s does not exists on mount '
                'point %s' % (folder_name, mount_point.mountpoint))
            g.log.info('Created directory %s:%s', mount_point.mountpoint,
                       folder_name)

            # Verify that hash layout values are set on each
            # bricks for the dir
            g.log.debug("Verifying hash layout values")
            ret = validate_files_in_dir(mount_point.client_system,
                                        mount_point.mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_DIR)
            self.assertTrue(
                ret, "Expected - Directory is stored "
                "on hashed bricks")
            g.log.info("Hash layout values are set on each bricks")

            # Verify that mount point should not display
            # xattr : trusted.gfid and dht
            g.log.debug("Loading extra attributes")
            ret = get_fattr_list(mount_point.client_system, folder_name)

            self.assertTrue(
                'trusted.gfid' not in ret,
                "Extended attribute trusted.gfid is presented on "
                "mount point %s and folder %s" %
                (mount_point.mountpoint, folder_name))
            self.assertTrue(
                'trusted.glusterfs.dht' not in ret,
                "Extended attribute trusted.glusterfs.dht is "
                "presented on mount point %s and folder %s" %
                (mount_point.mountpoint, folder_name))

            g.log.info(
                'Extended attributes trusted.gfid and '
                'trusted.glusterfs.dht does not exists on '
                'mount point %s:%s ', mount_point.mountpoint, folder_name)

            # Verify that mount point shows pathinfo xattr
            g.log.debug("Check for xattr trusted.glusterfs.pathinfo on %s:%s",
                        mount_point, folder_name)
            ret = get_fattr(mount_point.client_system,
                            mount_point.mountpoint,
                            'trusted.glusterfs.pathinfo',
                            encode="text")
            self.assertIsNotNone(
                ret, "trusted.glusterfs.pathinfo is not "
                "presented on %s:%s" % (mount_point.mountpoint, folder_name))
            g.log.info(
                'pathinfo xattr is displayed on mount point %s and '
                'dir %s', mount_point.mountpoint, folder_name)

            # Create a custom xattr for dir
            g.log.info("Set attribute user.foo to %s", folder_name)
            ret = set_fattr(mount_point.client_system, folder_name, 'user.foo',
                            'bar2')
            self.assertTrue(
                ret, "Setup custom attribute on %s:%s failed" %
                (mount_point.client_system, folder_name))

            g.log.info('Set custom attribute is set on %s:%s',
                       mount_point.client_system, folder_name)
            # Verify that custom xattr for directory is displayed
            # on mount point and bricks
            g.log.debug('Check xarttr user.foo on %s:%s',
                        mount_point.client_system, folder_name)
            ret = get_fattr(mount_point.client_system,
                            folder_name,
                            'user.foo',
                            encode="text")
            self.assertEqual(
                ret, 'bar2', "Xattr attribute user.foo is not presented on "
                "mount point %s and directory %s" %
                (mount_point.client_system, folder_name))

            g.log.info(
                'Custom xattr user.foo is presented on mount point'
                ' %s:%s ', mount_point.client_system, folder_name)

            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix.format(root=brick_dir,
                                               client_index=mount_index)

                ret = get_fattr(brick_server,
                                brick_path,
                                'user.foo',
                                encode="text")

                g.log.debug('Check custom xattr for directory on brick %s:%s',
                            brick_server, brick_path)
                self.assertEqual(
                    'bar2', ret, "Expected: user.foo should be on brick %s\n"
                    "Actual: Value of attribute foo.bar %s" %
                    (brick_path, ret))
                g.log.info('Custom xattr is presented on brick %s', brick_path)

            # Delete custom attribute
            ret = delete_fattr(mount_point.client_system, folder_name,
                               'user.foo')
            self.assertTrue(ret, "Failed to delete custom attribute")

            g.log.info('Removed custom attribute from directory %s:%s',
                       mount_point.client_system, folder_name)
            # Verify that custom xattr is not displayed after delete
            # on mount point and on the bricks

            g.log.debug('Looking if custom extra attribute user.foo is '
                        'presented on mount or on bricks after deletion')
            self.assertIsNone(
                get_fattr(mount_point.client_system,
                          folder_name,
                          'user.foo',
                          encode="text"),
                "Xattr user.foo is presented on mount point"
                " %s:%s after deletion" %
                (mount_point.mountpoint, folder_name))

            g.log.info(
                "Xattr user.foo is not presented after deletion"
                " on mount point %s:%s", mount_point.mountpoint, folder_name)

            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix.format(root=brick_dir,
                                               client_index=mount_index)
                self.assertIsNone(
                    get_fattr(brick_server, brick_path, 'user.foo'),
                    "Deleted xattr user.foo is presented on "
                    "brick %s:%s" % (brick, brick_path))
                g.log.info(
                    'Custom attribute is not presented after delete '
                    'from directory on brick %s:%s', brick, brick_path)

        # Repeat all of the steps for link of created directory
        for mount_index, mount_point in enumerate(self.mounts):
            linked_folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                                   client_index="%s_linked" %
                                                   mount_index)
            folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                            client_index=mount_index)
            # Create link to created dir
            command = 'ln -s {src} {dst}'.format(dst=linked_folder_name,
                                                 src=folder_name)
            ret, _, _ = g.run(mount_point.client_system, command)
            self.assertEqual(
                0, ret, 'Failed to create link %s to directory %s' %
                (linked_folder_name, folder_name))
            self.assertTrue(
                file_exists(mount_point.client_system, linked_folder_name),
                'Link does not exists on %s:%s' %
                (mount_point.client_system, linked_folder_name))
            g.log.info('Create link %s to directory %s', linked_folder_name,
                       folder_name)

            # Verify that hash layout values are set on each
            # bricks for the link to dir
            g.log.debug("Verifying hash layout values")
            ret = validate_files_in_dir(mount_point.client_system,
                                        mount_point.mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_LINK)
            self.assertTrue(
                ret, "Expected - Link to directory is stored "
                "on hashed bricks")
            g.log.info("Hash layout values are set on each bricks")

            # Verify that mount point should not display xattr :
            # trusted.gfid and dht
            g.log.debug("Loading extra attributes")
            ret = get_fattr_list(mount_point.client_system, linked_folder_name)

            self.assertTrue(
                'trusted.gfid' not in ret,
                "Extended attribute trudted.gfid is presented on "
                "mount point %s and folder %s" %
                (mount_point.mountpoint, linked_folder_name))

            self.assertTrue(
                'trusted.glusterfs.dht' not in ret,
                "Extended attribute trusted.glusterfs.dht is "
                "presented on mount point %s and folder %s" %
                (mount_point.mountpoint, linked_folder_name))

            g.log.info(
                'Extended attributes trusted.gfid and '
                'trusted.glusterfs.dht does not exists on '
                'mount point %s:%s ', mount_point.mountpoint,
                linked_folder_name)

            # Verify that mount point shows pathinfo xattr
            g.log.debug("Check if pathinfo is presented on %s:%s",
                        mount_point.client_system, linked_folder_name)
            self.assertIsNotNone(
                get_fattr(mount_point.client_system, mount_point.mountpoint,
                          'trusted.glusterfs.pathinfo'),
                "pathinfo is not displayed on mountpoint "
                "%s:%s" % (mount_point.client_system, linked_folder_name))
            g.log.info('pathinfo value is displayed on mount point %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Set custom Attribute to link
            g.log.debug("Set custom xattribute user.foo to %s:%s",
                        mount_point.client_system, linked_folder_name)
            self.assertTrue(
                set_fattr(mount_point.client_system, linked_folder_name,
                          'user.foo', 'bar2'))
            g.log.info('Successful in set custom attribute to %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Verify that custom xattr for directory is displayed
            # on mount point and bricks
            g.log.debug('Check mountpoint and bricks for custom xattribute')
            self.assertEqual(
                'bar2',
                get_fattr(mount_point.client_system,
                          linked_folder_name,
                          'user.foo',
                          encode="text"),
                'Custom xattribute is not presented on '
                'mount point %s:%s' %
                (mount_point.client_system, linked_folder_name))
            g.log.info("Custom xattribute is presented on mount point %s:%s",
                       mount_point.client_system, linked_folder_name)
            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix. \
                    format(root=brick_dir,
                           client_index="%s_linked" % mount_index)
                cmd = '[ -f %s ] && echo "yes" || echo "no"' % brick_path
                # Check if link exists
                _, ret, _ = g.run(brick_server, cmd)
                if 'no' in ret:
                    g.log.info("Link %s:%s does not exists", brick_server,
                               brick_path)
                    continue

                self.assertEqual(
                    get_fattr(brick_server,
                              brick_path,
                              'user.foo',
                              encode="text"), 'bar2',
                    "Actual: custom attribute not "
                    "found on brick %s:%s" % (brick_server, brick_path))
                g.log.info('Custom xattr for link found on brick %s:%s', brick,
                           brick_path)

            # Delete custom attribute
            g.log.debug('Removing customer attribute on mount point %s:%s',
                        mount_point.client_system, linked_folder_name)
            self.assertTrue(
                delete_fattr(mount_point.client_system, linked_folder_name,
                             'user.foo'), 'Fail on delete xattr user.foo')
            g.log.info('Deleted custom xattr from link %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Verify that custom xattr is not displayed after delete
            # on mount point and on the bricks
            g.log.debug(
                "Check if custom xattr is presented on %s:%s "
                "after deletion", mount_point.client_system,
                linked_folder_name)
            self.assertIsNone(
                get_fattr(mount_point.client_system,
                          linked_folder_name,
                          'user.foo',
                          encode="text"),
                "Expected: xattr user.foo to be not presented on"
                " %s:%s" % (mount_point.client_system, linked_folder_name))
            g.log.info("Custom xattr user.foo is not presented on %s:%s",
                       mount_point.client_system, linked_folder_name)
            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix. \
                    format(root=brick_dir,
                           client_index="%s_linked" % mount_index)
                cmd = '[ -f %s ] && echo "yes" || echo "no"' % brick_path
                # Check if link exists
                _, ret, _ = g.run(brick_server, cmd)
                if 'no' in ret:
                    g.log.info("Link %s:%s does not exists", brick_server,
                               brick_path)
                    continue

                self.assertIsNone(
                    get_fattr(brick_server,
                              brick_path,
                              'user.foo',
                              encode="text"),
                    "Extended custom attribute is presented on "
                    "%s:%s after deletion" % (brick_server, brick_path))
                g.log.info(
                    'Custom attribute is not presented after delete '
                    'from link on brick %s:%s', brick_server, brick_path)

        g.log.info('Directory - custom extended attribute validation getfattr,'
                   ' setfattr is successful')
    def test_spurious_rebalance(self):
        """
        In this test case:
        1. Trusted storage Pool of 3 nodes
        2. Create a distributed volumes with 3 bricks
        3. Start the volume
        4. Fuse mount the gluster volume on out of trusted nodes
        5. Remove a brick from the volume
        6. Check remove-brick status
        7. Stop the remove brick process
        8. Perform fix-layoyt on the volume
        9. Get the rebalance fix-layout status
       10. Create a directory from mount point
       11. Check trusted.glusterfs.dht extended attribue for newly
           created directory on the remove brick
        """

        # pylint: disable=too-many-statements
        my_servers = self.servers[0:3]
        my_server_info = {}
        for server in self.servers[0:3]:
            my_server_info[server] = self.all_servers_info[server]
        for index in range(1, 3):
            ret, _, _ = peer_probe(self.servers[0], self.servers[index])
            self.assertEqual(ret, 0, ("peer probe from %s to %s is failed",
                                      self.servers[0], self.servers[index]))
            g.log.info("peer probe is success from %s to "
                       "%s", self.servers[0], self.servers[index])

        self.volname = "testvol"
        bricks_list = form_bricks_list(self.mnode, self.volname, 3, my_servers,
                                       my_server_info)
        g.log.info("Creating a volume %s ", self.volname)
        ret, _, _ = volume_create(self.mnode,
                                  self.volname,
                                  bricks_list,
                                  force=False)
        self.assertEqual(ret, 0, ("Unable"
                                  "to create volume %s" % self.volname))
        g.log.info("Volume created successfully %s", self.volname)

        ret, _, _ = volume_start(self.mnode, self.volname, False)
        self.assertEqual(ret, 0, ("Failed to start the "
                                  "volume %s", self.volname))
        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        # Mounting a volume
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
        g.log.info("Volume mounted successfully : %s", self.volname)
        remove_brick_list = []
        remove_brick_list.append(bricks_list[2])
        ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
                                 'start')
        self.assertEqual(ret, 0, "Failed to start remove brick operation")
        g.log.info("Remove bricks operation started successfully")

        ret, _, _ = remove_brick(self.mnode, self.volname, remove_brick_list,
                                 'stop')
        self.assertEqual(ret, 0, "Failed to stop remove brick operation")
        g.log.info("Remove bricks operation stopped successfully")

        g.log.info("Starting Fix-layoyt on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname, True)
        self.assertEqual(ret, 0, ("Failed to start rebalance for fix-layout"
                                  "on the volume %s", self.volname))
        g.log.info("Successfully started fix-layout on the volume %s",
                   self.volname)

        # Wait for fix-layout to complete
        g.log.info("Waiting for fix-layout to complete")
        ret = wait_for_fix_layout_to_complete(self.mnode, self.volname)
        self.assertTrue(ret, ("Fix-layout is not yet complete on the volume "
                              "%s", self.volname))
        g.log.info("Fix-layout is successfully complete on the volume %s",
                   self.volname)
        ret = mkdir(self.mounts[0].client_system,
                    "%s/dir1" % self.mounts[0].mountpoint)
        self.assertTrue(ret, ("Failed to create directory dir1"))
        g.log.info("directory dir1 is created successfully")

        brick_server, brick_dir = remove_brick_list[0].split(':')
        folder_name = brick_dir + "/dir1"
        g.log.info("Check trusted.glusterfs.dht on host  %s for directory %s",
                   brick_server, folder_name)

        ret = get_fattr(brick_server, folder_name, 'trusted.glusterfs.dht')
        self.assertTrue(
            ret, ("Failed to get trusted.glusterfs.dht for %s" % folder_name))
        g.log.info("get trusted.glusterfs.dht xattr for %s successfully",
                   folder_name)
Beispiel #17
0
    def test_distribution_hash_value(self):
        """Test case tests DHT of files and directories based on hash value
        """
        # pylint: disable=too-many-locals
        for client_index, mount_obj in enumerate(self.mounts):
            client_host = mount_obj.client_system
            mountpoint = mount_obj.mountpoint

            # Create directory for initial data
            g.log.debug("Creating temporary folder on client's machine %s:%s",
                        client_host, self.temp_folder)
            if not mkdir(client_host, self.temp_folder):
                g.log.error("Failed create temporary directory "
                            "on client machine %s:%s",
                            client_host, self.temp_folder)
                raise ExecutionError("Failed create temporary directory "
                                     "on client machine %s:%s" %
                                     (client_host, self.temp_folder))
            g.log.info('Created temporary directory on client machine %s:%s',
                       client_host, self.temp_folder)
            # Prepare a set of data
            files = ["{prefix}{file_name}_{client_index}".
                     format(file_name=file_name,
                            client_index=client_index,
                            prefix='' if randint(1, 6) % 2
                            else choice('ABCD') + '/')
                     for file_name in map(chr, range(97, 123))]
            ret = self.create_files(client_host, self.temp_folder,
                                    files,
                                    "Lorem Ipsum is simply dummy text of the "
                                    "printing and typesetting industry.")
            self.assertTrue(ret, "Failed creating a set of files and dirs "
                                 "on %s:%s" % (client_host, self.temp_folder))
            g.log.info('Created data set on client machine on folder %s:%s',
                       client_host, self.temp_folder)

            # Copy prepared data to mount point
            cmd = ('cp -vr {source}/* {destination}'.format(
                source=self.temp_folder,
                destination=mountpoint))
            ret, _, _ = g.run(client_host, cmd)
            self.assertEqual(ret, 0, "Copy data to mount point %s:%s Failed")
            g.log.info('Copied prepared data to mount point %s:%s',
                       client_host, mountpoint)

            # Verify that hash layout values are set on each
            # bricks for the dir
            g.log.debug("Verifying DHT layout")
            ret = validate_files_in_dir(client_host, mountpoint,
                                        test_type=TEST_LAYOUT_IS_COMPLETE)
            self.assertTrue(ret, "TEST_LAYOUT_IS_COMPLETE: FAILED")
            g.log.info("TEST_LAYOUT_IS_COMPLETE: PASS on %s:%s ",
                       client_host, mountpoint)

            g.log.debug("Verifying files and directories")
            ret = validate_files_in_dir(client_host, mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_DIRS)
            self.assertTrue(ret, "TEST_FILE_EXISTS_ON_HASHED_BRICKS: FAILED")
            g.log.info("TEST_FILE_EXISTS_ON_HASHED_BRICKS: PASS")

            # Verify "trusted.gfid" extended attribute of the
            # directory/file on all the bricks
            gfids = dict()
            g.log.debug("Check if trusted.gfid is presented on the bricks")
            for brick_item in get_all_bricks(self.mnode, self.volname):
                brick_host, brick_dir = brick_item.split(':')

                for target_destination in files:
                    if not file_exists(brick_host, '{brick_dir}/{dest}'.
                                       format(brick_dir=brick_dir,
                                              dest=target_destination)):
                        continue
                    ret = get_fattr(brick_host, '%s/%s' %
                                    (brick_dir, target_destination),
                                    'trusted.gfid')
                    self.assertIsNotNone(ret,
                                         "trusted.gfid is not presented "
                                         "on %s/%s" % (brick_dir,
                                                       target_destination))
                    g.log.info("Verified trusted.gfid on brick %s:%s",
                               brick_item, target_destination)
                    gfids.setdefault(target_destination, []).append(ret)

            g.log.debug('Check if trusted.gfid is same on all the bricks')
            self.assertTrue(all([False if len(set(gfids[k])) > 1 else True
                                 for k in gfids]),
                            "trusted.gfid should be same on all the bricks")
            g.log.info('trusted.gfid is same on all the bricks')
            # Verify that mount point shows pathinfo xattr.
            g.log.debug("Check if pathinfo is presented on mount point "
                        "%s:%s", client_host, mountpoint)
            ret = get_fattr(client_host, mountpoint,
                            'trusted.glusterfs.pathinfo')
            self.assertIsNotNone(ret, "pathinfo is not presented on mount "
                                      "point %s:%s" % (client_host,
                                                       mountpoint))

            g.log.info('trusted.glusterfs.pathinfo is presented on mount'
                       ' point %s:%s', client_host, mountpoint)

            # Mount point should not display xattr:
            # trusted.gfid and trusted.glusterfs.dht
            g.log.debug("Check if trusted.gfid and trusted.glusterfs.dht are "
                        "not presented on mount point %s:%s", client_host,
                        mountpoint)
            attributes = get_fattr_list(client_host, mountpoint)
            self.assertFalse('trusted.gfid' in attributes,
                             "Expected: Mount point shouldn't display xattr:"
                             "{xattr}. Actual: xattrs {xattr} is "
                             "presented on mount point".
                             format(xattr='trusted.gfid'))
            self.assertFalse('trusted.glusterfs.dht' in attributes,
                             "Expected: Mount point shouldn't display xattr:"
                             "{xattr}. Actual: xattrs {xattr} is "
                             "presented on mount point".
                             format(xattr='trusted.glusterfs.dht'))

            g.log.info("trusted.gfid and trusted.glusterfs.dht are not "
                       "presented on mount point %s:%s", client_host,
                       mountpoint)
        g.log.info('Files and dirs are stored on bricks based on hash value')
 def _get_fattr_for_the_brick(self, brick):
     """Get xattr of trusted.afr.volname-client-0 for the given brick"""
     host, fqpath = brick.split(":")
     fqpath = fqpath + "/dir1"
     fattr = "trusted.afr.{}-client-0".format(self.volname)
     return get_fattr(host, fqpath, fattr, encode="hex")
    def test_afr_gfid_heal(self):
        """
        Description: This test case runs split-brain resolution
                     on a 5 files in split-brain on a 1x2 volume.
                     After resolving split-brain, it makes sure that
                     split brain resolution doesn't work on files
                     already in split brain.
        """

        g.log.info("disabling the self heal daemon")
        ret = disable_self_heal_daemon(self.mnode, self.volname)
        self.assertTrue(ret, "unable to disable self heal daemon")
        g.log.info("Successfully disabled the self heal daemon")

        # getting list of all bricks
        all_bricks = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(all_bricks, "failed to get list of bricks")
        g.log.info("bringing down brick1")
        ret = bring_bricks_offline(self.volname, all_bricks[0:1])
        self.assertTrue(ret, "unable to bring brick1 offline")
        g.log.info("Successfully brought the following brick offline "
                   ": %s", str(all_bricks[0]))
        g.log.info("verifying if brick1 is offline")
        ret = are_bricks_offline(self.mnode, self.volname, all_bricks[0:1])
        self.assertTrue(ret, "brick1 is still online")
        g.log.info("verified: brick1 is offline")

        g.log.info("creating 5 files from mount point")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("python %s create_files "
                   "-f 5 --base-file-name test_file --fixed-file-size 1k %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
        # Validate I/O
        g.log.info("Wait for IO to complete and validate IO.....")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")
        g.log.info("Successfully created a file from mount point")

        g.log.info("bringing brick 1 back online")
        ret = bring_bricks_online(self.mnode, self.volname, all_bricks[0:1])
        self.assertIsNotNone(ret, "unable to bring brick 1 online")
        g.log.info("Successfully brought the following brick online "
                   ": %s", str(all_bricks[0]))
        g.log.info("verifying if brick1 is online")
        ret = are_bricks_online(self.mnode, self.volname, all_bricks[0:1])
        self.assertTrue(ret, "brick1 is not online")
        g.log.info("verified: brick1 is online")

        g.log.info("bringing down brick2")
        ret = bring_bricks_offline(self.volname, all_bricks[1:2])
        self.assertTrue(ret, "unable to bring brick2 offline")
        g.log.info("Successfully brought the following brick offline "
                   ": %s", str(all_bricks[1]))
        g.log.info("verifying if brick2 is offline")
        ret = are_bricks_offline(self.mnode, self.volname, all_bricks[1:2])
        self.assertTrue(ret, "brick2 is still online")
        g.log.info("verified: brick2 is offline")

        g.log.info("creating 5 new files of same name from mount point")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("python %s create_files "
                   "-f 5 --base-file-name test_file --fixed-file-size 10k %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
        # Validate I/O
        g.log.info("Wait for IO to complete and validate IO.....")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("IO is successful on all mounts")
        g.log.info("Successfully created a new file of same name "
                   "from mount point")

        g.log.info("bringing brick2 back online")
        ret = bring_bricks_online(self.mnode, self.volname, all_bricks[1:2])
        self.assertIsNotNone(ret, "unable to bring brick2 online")
        g.log.info("Successfully brought the following brick online "
                   ": %s", str(all_bricks[1]))
        g.log.info("verifying if brick2 is online")
        ret = are_bricks_online(self.mnode, self.volname, all_bricks[1:2])
        self.assertTrue(ret, "brick2 is not online")
        g.log.info("verified: brick2 is online")

        g.log.info("enabling the self heal daemon")
        ret = enable_self_heal_daemon(self.mnode, self.volname)
        self.assertTrue(ret, "failed to enable self heal daemon")
        g.log.info("Successfully enabled the self heal daemon")

        g.log.info("checking if volume is in split-brain")
        ret = is_volume_in_split_brain(self.mnode, self.volname)
        self.assertTrue(ret, "unable to create split-brain scenario")
        g.log.info("Successfully created split brain scenario")

        g.log.info("resolving split-brain by choosing first brick as "
                   "the source brick")
        node, brick_path = all_bricks[0].split(':')
        for fcount in range(5):
            command = ("gluster v heal " + self.volname + " split-brain "
                       "source-brick " + all_bricks[0] + ' /test_file' +
                       str(fcount) + '.txt')
            ret, _, _ = g.run(node, command)
            self.assertEqual(ret, 0, "command execution not successful")
        # triggering heal
        ret = trigger_heal(self.mnode, self.volname)
        self.assertTrue(ret, "heal not triggered")
        g.log.info("Successfully triggered heal")
        # waiting for heal to complete
        ret = monitor_heal_completion(self.mnode,
                                      self.volname,
                                      timeout_period=240)
        self.assertTrue(ret, "heal not completed")
        g.log.info("Heal completed successfully")
        # checking if any file is in split-brain
        ret = is_volume_in_split_brain(self.mnode, self.volname)
        self.assertFalse(ret, "file still in split-brain")
        g.log.info("Successfully resolved split brain situation using "
                   "CLI based resolution")

        g.log.info("resolving split-brain on a file not in split-brain")
        node, brick_path = all_bricks[0].split(':')
        command = ("gluster v heal " + self.volname + " split-brain "
                   "source-brick " + all_bricks[1] + " /test_file0.txt")
        ret, _, _ = g.run(node, command)
        self.assertNotEqual(
            ret, 0, "Unexpected: split-brain resolution "
            "command is successful on a file which"
            " is not in split-brain")
        g.log.info("Expected: split-brian resolution command failed on "
                   "a file which is not in split-brain")

        g.log.info("checking the split-brain status of each file")
        for fcount in range(5):
            fpath = (self.mounts[0].mountpoint + '/test_file' + str(fcount) +
                     '.txt')
            status = get_fattr(self.mounts[0].client_system, fpath,
                               'replica.split-brain-status')
            compare_string = ("The file is not under data or metadata "
                              "split-brain")
            self.assertEqual(
                status.rstrip('\x00'), compare_string,
                "file test_file%s is under"
                " split-brain" % str(fcount))
        g.log.info("none of the files are under split-brain")
Beispiel #20
0
    def test_remove_brick_scenarios(self):
        # pylint: disable=too-many-statements
        """
        Test case:
        1. Create a cluster by peer probing and create a volume.
        2. Mount it and write some IO like 100000 files.
        3. Initiate the remove-brick operation on pair of bricks.
        4. Stop the remove-brick operation using other pairs of bricks.
        5. Get the remove-brick status using other pair of bricks in
           the volume.
        6. stop the rebalance process using non-existing brick.
        7. Check for the remove-brick status using non-existent bricks.
        8. Stop the remove-brick operation where remove-brick start have been
            initiated.
        9. Perform fix-layout on the volume.
        10. Get the rebalance fix-layout.
        11. Create a directory from mountpoint.
        12. check for 'trusted.glusterfs.dht' extended attribute in the
            newly created directory in the bricks where remove brick stopped
            (which was tried to be removed in step 8).
        13. Umount, stop and delete the volume.
        """

        # Getting a list of all the bricks.
        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        # Running IO.
        pool = ThreadPool(5)
        # Build a command per each thread
        # e.g. "seq 1 20000 ... touch" , "seq 20001 40000 ... touch" etc
        cmds = ["seq {} {} | sed 's|^|{}/test_file|' | xargs touch".
                format(i, i + 19999, self.mounts[0].mountpoint)
                for i in range(1, 100000, 20000)]
        # Run all commands in parallel (each thread returns a tuple from g.run)
        ret = pool.map(
            lambda command: g.run(self.mounts[0].client_system, command), cmds)
        # ret -> list of tuples [(return_code, stdout, stderr),...]
        pool.close()
        pool.join()
        # Verify all commands' exit code is 0 (first element of each tuple)
        for thread_return in ret:
            self.assertEqual(thread_return[0], 0, "File creation failed.")
        g.log.info("Files create on mount point.")

        # Removing bricks from volume.
        remove_brick_list_original = bricks_list[3:6]
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_original, 'start')
        self.assertEqual(ret, 0, "Failed to start remove brick operation.")
        g.log.info("Remove bricks operation started successfully.")

        # Stopping brick remove operation for other pair of bricks.
        remove_brick_list_other_pair = bricks_list[0:3]
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_other_pair, 'stop')
        self.assertEqual(ret, 1, "Successfully stopped remove brick operation "
                                 "on other pair of bricks.")
        g.log.info("Failed to stop remove brick operation on"
                   " other pair of bricks.")

        # Checking status of brick remove operation for other pair of bricks.
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_other_pair, 'status')
        self.assertEqual(ret, 1, "Error: Got status on other pair of bricks.")
        g.log.info("EXPECTED: Failed to get status on other pair of bricks.")

        # Stopping remove operation for non-existent bricks.
        remove_brick_list_non_existent = [bricks_list[0] + 'non-existent',
                                          bricks_list[1] + 'non-existent',
                                          bricks_list[2] + 'non-existent']
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_non_existent, 'stop')
        self.assertEqual(ret, 1, "Error: Successfully stopped remove brick"
                                 " operation on non-existent bricks.")
        g.log.info("EXPECTED: Failed to stop remove brick operation"
                   " on non existent bricks.")

        # Checking status of brick remove opeation for non-existent bricks.
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_non_existent, 'status')
        self.assertEqual(ret, 1,
                         "Error: Status on non-existent bricks successful.")
        g.log.info("EXPECTED: Failed to get status on non existent bricks.")

        # Stopping the initial brick remove opeation.
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_original, 'stop')
        self.assertEqual(ret, 0, "Failed to stop remove brick operation")
        g.log.info("Remove bricks operation stop successfully")

        # Start rebalance fix layout for volume.
        g.log.info("Starting Fix-layout on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname, fix_layout=True)
        self.assertEqual(ret, 0, ("Failed to start rebalance for fix-layout"
                                  "on the volume %s", self.volname))
        g.log.info("Successfully started fix-layout on the volume %s",
                   self.volname)

        # Checking status of rebalance fix layout for the volume.
        ret, _, _ = rebalance_status(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to check status of rebalance"
                                  "on the volume %s", self.volname))
        g.log.info("Successfully checked status on the volume %s",
                   self.volname)
        ret = wait_for_fix_layout_to_complete(self.mnode,
                                              self.volname, timeout=30000)
        self.assertTrue(ret, ("Failed to check for rebalance."))
        g.log.info("Rebalance completed.")

        # Creating directory.
        dir_name = ''
        for counter in range(0, 10):
            ret = mkdir(self.mounts[0].client_system,
                        self.mounts[0].mountpoint + "/dir1" + str(counter),
                        parents=True)
            if ret:
                dir_name = "/dir1" + str(counter)
                break
        self.assertTrue(ret, ("Failed to create directory dir1."))
        g.log.info("Directory dir1 created successfully.")

        # Checking value of attribute for dht.
        brick_server, brick_dir = bricks_list[0].split(':')
        dir_name = brick_dir + dir_name
        g.log.info("Check trusted.glusterfs.dht on host  %s for directory %s",
                   brick_server, dir_name)
        ret = get_fattr(brick_server, dir_name, 'trusted.glusterfs.dht')
        self.assertTrue(ret, ("Failed to get trusted.glusterfs.dht for %s"
                              % dir_name))
        g.log.info("Get trusted.glusterfs.dht xattr for %s successfully",
                   dir_name)
    def test_detach_node_used_to_mount(self):
        # pylint: disable=too-many-statements
        """
        Test case:
        1.Create a 1X3 volume with only 3 nodes from the cluster.
        2.Mount volume on client node using the ip of the fourth node.
        3.Write IOs to the volume.
        4.Detach node N4 from cluster.
        5.Create a new directory on the mount point.
        6.Create a few files using the same command used in step 3.
        7.Add three more bricks to make the volume
          2x3 using add-brick command.
        8.Do a gluster volume rebalance on the volume.
        9.Create more files from the client on the mount point.
        10.Check for files on bricks from both replica sets.
        11.Create a new directory from the client on the mount point.
        12.Check for directory in both replica sets.
        """

        # Create and start a volume
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")
        g.log.info("Volume %s created successfully", self.volname)

        # Mounting the volume.
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.servers[4],
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
        g.log.info("Volume mounted successfully using %s", self.servers[4])

        # Creating 100 files.
        command = ('for number in `seq 1 100`;do touch ' +
                   self.mounts[0].mountpoint + '/file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation failed.")
        g.log.info("Files create on mount point.")

        # Detach N4 from the list.
        ret, _, _ = peer_detach(self.mnode, self.servers[4])
        self.assertEqual(ret, 0, "Failed to detach %s" % self.servers[4])
        g.log.info("Peer detach successful %s", self.servers[4])

        # Creating a dir.
        ret = mkdir(self.mounts[0].client_system,
                    self.mounts[0].mountpoint + "/dir1",
                    parents=True)
        self.assertTrue(ret, ("Failed to create directory dir1."))
        g.log.info("Directory dir1 created successfully.")

        # Creating 100 files.
        command = ('for number in `seq 101 200`;do touch ' +
                   self.mounts[0].mountpoint + '/file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation failed.")
        g.log.info("Files create on mount point.")

        # Forming brick list
        brick_list = form_bricks_list_to_add_brick(self.mnode, self.volname,
                                                   self.servers,
                                                   self.all_servers_info)

        # Adding bricks
        ret, _, _ = add_brick(self.mnode, self.volname, brick_list)
        self.assertEqual(ret, 0,
                         "Failed to add brick to the volume %s" % self.volname)
        g.log.info("Brick added successfully to the volume %s", self.volname)

        # Start rebalance for volume.
        g.log.info("Starting rebalance on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to start rebalance "
                                  "on the volume %s", self.volname))
        g.log.info("Successfully started rebalance on the volume %s",
                   self.volname)

        # Creating 100 files.
        command = ('for number in `seq 201 300`;do touch ' +
                   self.mounts[0].mountpoint + '/file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation failed.")
        g.log.info("Files create on mount point.")

        # Check for files on bricks.
        attempts = 10
        while attempts:
            number = str(randint(1, 300))
            for brick in brick_list:
                brick_server, brick_dir = brick.split(':')
                file_name = brick_dir + "/file" + number
                if file_exists(brick_server, file_name):
                    g.log.info("Check xattr"
                               " on host %s for file %s", brick_server,
                               file_name)
                    ret = get_fattr_list(brick_server, file_name)
                    self.assertTrue(ret,
                                    ("Failed to get xattr for %s" % file_name))
                    g.log.info("Got xattr for %s successfully", file_name)
            attempts -= 1

        # Creating a dir.
        ret = mkdir(self.mounts[0].client_system,
                    self.mounts[0].mountpoint + "/dir2")
        if not ret:
            attempts = 5
            while attempts:
                ret = mkdir(self.mounts[0].client_system,
                            self.mounts[0].mountpoint + "/dir2")
                if ret:
                    break
                attempts -= 1
        self.assertTrue(ret, ("Failed to create directory dir2."))
        g.log.info("Directory dir2 created successfully.")

        # Check for directory in both replica sets.
        for brick in brick_list:
            brick_server, brick_dir = brick.split(':')
            folder_name = brick_dir + "/dir2"
            if file_exists(brick_server, folder_name):
                g.log.info(
                    "Check trusted.glusterfs.dht"
                    " on host %s for directory %s", brick_server, folder_name)
                ret = get_fattr(brick_server, folder_name,
                                'trusted.glusterfs.dht')
                self.assertTrue(ret, ("Failed to get trusted.glusterfs.dht"
                                      " xattr for %s" % folder_name))
                g.log.info(
                    "Get trusted.glusterfs.dht xattr"
                    " for %s successfully", folder_name)
    def test_remove_brick_scenarios(self):
        # pylint: disable=too-many-statements
        """
        Test case:
        1. Create a cluster by peer probing and create a volume.
        2. Mount it and write some IO like 100000 files.
        3. Initiate the remove-brick operation on pair of bricks.
        4. Stop the remove-brick operation using other pairs of bricks.
        5. Get the remove-brick status using other pair of bricks in
           the volume.
        6. stop the rebalance process using non-existing brick.
        7. Check for the remove-brick status using non-existent bricks.
        8. Stop the remove-brick operation where remove-brick start have been
            initiated.
        9. Perform fix-layout on the volume.
        10. Get the rebalance fix-layout.
        11. Create a directory from mountpoint.
        12. check for 'trusted.glusterfs.dht' extended attribute in the
            newly created directory in the bricks where remove brick stopped
            (which was tried to be removed in step 8).
        13. Umount, stop and delete the volume.
        """

        # Getting a list of all the bricks.
        g.log.info("Get all the bricks of the volume")
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Failed to get the brick list")
        g.log.info("Successfully got the list of bricks of volume")

        # Running IO.
        command = ('for number in `seq 1 100000`;do touch ' +
                   self.mounts[0].mountpoint + '/test_file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation: failed.")
        g.log.info("Files create on mount point.")

        # Removing bricks from volume.
        remove_brick_list_original = bricks_list[3:6]
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_original, 'start')
        self.assertEqual(ret, 0, "Failed to start remove brick operation.")
        g.log.info("Remove bricks operation started successfully.")

        # Stopping brick remove operation for other pair of bricks.
        remove_brick_list_other_pair = bricks_list[0:3]
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_other_pair, 'stop')
        self.assertEqual(
            ret, 1, "Successfully stopped remove brick operation "
            "on other pair of bricks.")
        g.log.info("Failed to stop remove brick operation on"
                   " other pair of bricks.")

        # Checking status of brick remove operation for other pair of bricks.
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_other_pair, 'status')
        self.assertEqual(ret, 1, "Error: Got status on other pair of bricks.")
        g.log.info("EXPECTED: Failed to get status on other pair of bricks.")

        # Stopping remove operation for non-existent bricks.
        remove_brick_list_non_existent = [
            bricks_list[0] + 'non-existent', bricks_list[1] + 'non-existent',
            bricks_list[2] + 'non-existent'
        ]
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_non_existent, 'stop')
        self.assertEqual(
            ret, 1, "Error: Successfully stopped remove brick"
            " operation on non-existent bricks.")
        g.log.info("EXPECTED: Failed to stop remove brick operation"
                   " on non existent bricks.")

        # Checking status of brick remove opeation for non-existent bricks.
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_non_existent, 'status')
        self.assertEqual(ret, 1,
                         "Error: Status on non-existent bricks successful.")
        g.log.info("EXPECTED: Failed to get status on non existent bricks.")

        # Stopping the initial brick remove opeation.
        ret, _, _ = remove_brick(self.mnode, self.volname,
                                 remove_brick_list_original, 'stop')
        self.assertEqual(ret, 0, "Failed to stop remove brick operation")
        g.log.info("Remove bricks operation stop successfully")

        # Start rebalance fix layout for volume.
        g.log.info("Starting Fix-layout on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname, fix_layout=True)
        self.assertEqual(ret, 0, ("Failed to start rebalance for fix-layout"
                                  "on the volume %s", self.volname))
        g.log.info("Successfully started fix-layout on the volume %s",
                   self.volname)

        # Checking status of rebalance fix layout for the volume.
        ret, _, _ = rebalance_status(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to check status of rebalance"
                                  "on the volume %s", self.volname))
        g.log.info("Successfully checked status on the volume %s",
                   self.volname)
        ret = wait_for_fix_layout_to_complete(self.mnode,
                                              self.volname,
                                              timeout=30000)
        self.assertTrue(ret, ("Failed to check for rebalance."))
        g.log.info("Rebalance completed.")

        # Creating directory.
        dir_name = ''
        for counter in range(0, 10):
            ret = mkdir(self.mounts[0].client_system,
                        self.mounts[0].mountpoint + "/dir1" + str(counter),
                        parents=True)
            if ret:
                dir_name = "/dir1" + str(counter)
                break
        self.assertTrue(ret, ("Failed to create directory dir1."))
        g.log.info("Directory dir1 created successfully.")

        # Checking value of attribute for dht.
        brick_server, brick_dir = bricks_list[0].split(':')
        folder_name = brick_dir + dir_name
        g.log.info("Check trusted.glusterfs.dht on host  %s for directory %s",
                   brick_server, folder_name)
        ret = get_fattr(brick_server, folder_name, 'trusted.glusterfs.dht')
        self.assertTrue(
            ret, ("Failed to get trusted.glusterfs.dht for %s" % folder_name))
        g.log.info("Get trusted.glusterfs.dht xattr for %s successfully",
                   folder_name)
Beispiel #23
0
    def test_create_directory(self):

        g.log.info("creating multiple,multilevel directories")
        m_point = self.mounts[0].mountpoint
        command = 'mkdir -p ' + m_point + '/root_dir/test_dir{1..3}'
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(
            ret, 0,
            "directory creation failed on %s" % self.mounts[0].mountpoint)
        command = 'ls ' + m_point + '/root_dir'
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "ls failed on parent directory:root_dir")
        g.log.info("ls on parent directory: successful")

        g.log.info("creating files at different directory levels inside %s",
                   self.mounts[0].mountpoint)
        command = 'touch ' + m_point + \
            '/root_dir/test_file{1..5} ' + m_point + \
            '/root_dir/test_dir{1..3}/test_file{1..5}'
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation: failed")
        command = 'ls ' + m_point + '/root_dir'
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "can't list the created directories")
        list_of_files_and_dirs = out.split('\n')
        flag = True
        for x_count in range(3):
            dir_name = 'test_dir%d' % (x_count + 1)
            if dir_name not in list_of_files_and_dirs:
                flag = False
        for x_count in range(5):
            file_name = 'test_file%d' % (x_count + 1)
            if file_name not in list_of_files_and_dirs:
                flag = False
        self.assertTrue(
            flag, "ls command didn't list all the "
            "directories and files")
        g.log.info("creation of files at multiple levels successful")

        g.log.info("creating a list of all directories")
        command = 'cd ' + m_point + ';find root_dir -type d -print'
        ret, out, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "creation of directory list failed")
        list_of_all_dirs = out.split('\n')
        del list_of_all_dirs[-1]

        g.log.info("verifying that all the directories are present on "
                   "every brick and the layout ranges are correct")
        flag = validate_files_in_dir(self.clients[0],
                                     m_point + '/root_dir',
                                     test_type=k.TEST_LAYOUT_IS_COMPLETE)
        self.assertTrue(flag, "Layout has some holes or overlaps")
        g.log.info("Layout is completely set")

        g.log.info("Checking if gfid xattr of directories is displayed and"
                   "is same on all the bricks on the server node")
        brick_list = get_all_bricks(self.mnode, self.volname)
        for direc in list_of_all_dirs:
            list_of_gfid = []
            for brick in brick_list:
                # the partition function returns a tuple having 3 elements.
                # the host address, the character passed i.e. ':'
                # , and the brick path
                brick_tuple = brick.partition(':')
                brick_path = brick_tuple[2]
                gfid = get_fattr(brick_tuple[0], brick_path + '/' + direc,
                                 'trusted.gfid')
                list_of_gfid.append(gfid)
            flag = True
            for x_count in range(len(list_of_gfid) - 1):
                if list_of_gfid[x_count] != list_of_gfid[x_count + 1]:
                    flag = False
            self.assertTrue(flag, ("the gfid for the directory %s is not "
                                   "same on all the bricks", direc))
        g.log.info("the gfid for each directory is the same on all the "
                   "bricks")

        g.log.info("Verify that for all directories mount point "
                   "should not display xattr")
        for direc in list_of_all_dirs:
            list_of_xattrs = get_fattr_list(
                self.mounts[0].client_system,
                self.mounts[0].mountpoint + '/' + direc)
            if 'security.selinux' in list_of_xattrs:
                del list_of_xattrs['security.selinux']
            self.assertFalse(
                list_of_xattrs, "one or more xattr being "
                "displayed on mount point")
        g.log.info("Verified : mount point not displaying important " "xattrs")

        g.log.info("Verifying that for all directories only mount point "
                   "shows pathinfo xattr")
        for direc in list_of_all_dirs:
            fattr = get_fattr(self.mounts[0].client_system,
                              self.mounts[0].mountpoint + '/' + direc,
                              'trusted.glusterfs.pathinfo')
            self.assertTrue(fattr, ("pathinfo not displayed for the "
                                    "directory %s on mount point", direc))
        brick_list = get_all_bricks(self.mnode, self.volname)
        for direc in list_of_all_dirs:
            for brick in brick_list:
                host = brick.partition(':')[0]
                brick_path = brick.partition(':')[2]
                fattr = get_fattr(host, brick_path + '/' + direc,
                                  'trusted.glusterfs.pathinfo')
                self.assertIsNone(fattr, "subvolume displaying pathinfo")
        g.log.info("Verified: only mount point showing pathinfo "
                   "for all the directories")
    def test_accessing_file_when_dht_layout_is_stale(self):
        '''
        Description : Checks if a file can be opened and accessed if the dht
                      layout has become stale.

        Steps:
        1. Create, start and mount a volume consisting 2 subvols on 2 clients
        2. Create a dir `dir` and file `dir/file` from client0
        3. Take note of layouts of `brick1`/dir and `brick2`/dir of the volume
        4. Validate for success lookup from only one brick path
        5. Re-assign layouts ie., brick1/dir to brick2/dir and vice-versa
        6. Remove `dir/file` from client0 and recreate same file from client0
           and client1
        7. Validate for success lookup from only one brick path (as layout is
           changed file creation path will be changed)
        8. Validate checksum is matched from both the clients
        '''

        # Will be used in _get_brick_node_and_path
        self.dir_path = '/dir'

        # Will be used in argument to _assert_file_lookup
        file_name = '/file'

        dir_path = self.mounts[0].mountpoint + self.dir_path
        file_path = dir_path + file_name

        client0, client1 = self.clients[0], self.clients[1]
        fattr = 'trusted.glusterfs.dht'
        io_cmd = ('cat /dev/urandom | tr -dc [:space:][:print:] | '
                  'head -c 1K > {}'.format(file_path))

        # Create a dir from client0
        ret = mkdir(self.clients[0], dir_path)
        self.assertTrue(ret, 'Unable to create a directory from mount point')

        # Touch a file with data from client0
        ret, _, _ = g.run(client0, io_cmd)
        self.assertEqual(ret, 0, 'Failed to create a file on mount')

        # Yields `node` and `brick-path` from first brick of each subvol
        gen = self._get_brick_node_and_path()

        # Take note of newly created directory's layout from org_subvol1
        node1, fqpath1 = next(gen)
        layout1 = get_fattr(node1, fqpath1, fattr)
        self.assertIsNotNone(layout1,
                             '{} is not present on {}'.format(fattr, fqpath1))

        # Lookup on file from node1 should fail as `dir/file` will always get
        # hashed to node2 in a 2-brick distribute volume by default
        self._assert_file_lookup(node1,
                                 fqpath1 + file_name,
                                 when='before',
                                 result=False)

        # Take note of newly created directory's layout from org_subvol2
        node2, fqpath2 = next(gen)
        layout2 = get_fattr(node2, fqpath2, fattr)
        self.assertIsNotNone(layout2,
                             '{} is not present on {}'.format(fattr, fqpath2))

        # Lookup on file from node2 should pass
        self._assert_file_lookup(node2,
                                 fqpath2 + file_name,
                                 when='before',
                                 result=True)

        # Set org_subvol2 directory layout to org_subvol1 and vice-versa
        for node, fqpath, layout, vol in ((node1, fqpath1, layout2, (2, 1)),
                                          (node2, fqpath2, layout1, (1, 2))):
            ret = set_fattr(node, fqpath, fattr, layout)
            self.assertTrue(
                ret, 'Failed to set layout of org_subvol{} on '
                'brick {} of org_subvol{}'.format(vol[0], fqpath, vol[1]))

        # Remove file after layout change from client0
        cmd = 'rm -f {}'.format(file_path)
        ret, _, _ = g.run(client0, cmd)
        self.assertEqual(ret, 0, 'Failed to delete file after layout change')

        # Create file with same name as above after layout change from client0
        # and client1
        for client in (client0, client1):
            ret, _, _ = g.run(client, io_cmd)
            self.assertEqual(
                ret, 0, 'Failed to create file from '
                '{} after layout change'.format(client))

        # After layout change lookup on file from node1 should pass
        self._assert_file_lookup(node1,
                                 fqpath1 + file_name,
                                 when='after',
                                 result=True)

        # After layout change lookup on file from node2 should fail
        self._assert_file_lookup(node2,
                                 fqpath2 + file_name,
                                 when='after',
                                 result=False)

        # Take note of checksum from client0 and client1
        checksums = [None] * 2
        for index, mount in enumerate(self.mounts):
            ret, checksums[index] = collect_mounts_arequal(mount, dir_path)
            self.assertTrue(
                ret, 'Failed to get arequal on client {}'.format(
                    mount.client_system))

        # Validate no checksum mismatch
        self.assertEqual(checksums[0], checksums[1],
                         'Checksum mismatch between client0 and client1')

        g.log.info('Pass: Test accessing file on stale layout is complete.')
    def test_custom_xattr_with_subvol_down_dir_exists(self):
        """
        Description:
        Steps:
        1) Create directories from mount point.
        2) Bring one or more(not all) dht sub-volume(s) down by killing
           processes on that server
        3) Create a custom xattr for dir hashed to down sub-volume and also for
           another dir not hashing to down sub-volumes
           # setfattr -n user.foo -v bar2 <dir>
        4) Verify that custom xattr for directory is displayed on mount point
           and bricks for both directories
           # getfattr -n user.foo <dir>
           # getfattr -n user.foo <brick_path>/<dir>
        5) Modify custom xattr value and verify that custom xattr for directory
           is displayed on mount point and all up bricks
           # setfattr -n user.foo -v ABC <dir>
        6) Verify that custom xattr is not displayed once you remove it on
           mount point and all up bricks
        7) Verify that mount point shows pathinfo xattr for dir hashed to down
           sub-volume and also for dir not hashed to down sub-volumes
           # getfattr -n trusted.glusterfs.pathinfo <dir>
        8) Again create a custom xattr for dir not hashing to down sub-volumes
           # setfattr -n user.foo -v star1 <dir>
        9) Bring up the sub-volumes
        10) Execute lookup on parent directory of both <dir> from mount point
        11) Verify Custom extended attributes for dir1 on all bricks
        """
        # pylint: disable=protected-access
        # Create dir1 on client0
        self._create_dir(dir_name="dir1")

        # Get subvol list
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        self.assertIsNotNone(subvols, "Failed to get subvols")

        # Finding a dir name such that it hashes to a different subvol
        newhash = find_new_hashed(subvols, "/", "dir1")
        new_name = str(newhash.newname)
        new_subvol_count = newhash.subvol_count

        # Create a dir with the new name
        self._create_dir(dir_name=new_name)

        # Kill the brick/subvol to which the new dir hashes
        ret = bring_bricks_offline(
            self.volname, subvols[new_subvol_count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s',
                              subvols[new_subvol_count]))
        g.log.info('DHT subvol %s is offline', subvols[new_subvol_count])

        # Set the xattr on dir hashing to down subvol
        ret = set_fattr(self.client, '{}/{}'.format(self.m_point, new_name),
                        'user.foo', 'bar2')
        self.assertFalse(ret, "Unexpected: custom xattr set successfully"
                              " for dir hashing to down subvol")
        g.log.info("Expected: Failed to set xattr on dir:%s"
                   " which hashes to down subvol due to error: Transport"
                   " endpoint not connected", new_name)

        # Check if the trusted.glusterfs.pathinfo is displayed
        # for dir hashing to down subvol on mointpoint
        ret = get_fattr(self.client, '{}/{}'.format(
            self.m_point, new_name), 'trusted.glusterfs.pathinfo')
        self.assertIsNotNone(ret, "Failed to get the xattr"
                             " on:{}".format(self.client))
        g.log.info("The xattr trusted.glusterfs.pathinfo"
                   " is displayed on mointpoint for %s", new_name)

        # Set the xattr on dir hashing to down subvol
        ret = set_fattr(self.client, '{}/{}'.format(self.m_point, new_name),
                        'user.foo', 'star1')
        self.assertFalse(ret, "Unexpected: custom xattr set successfully"
                              " for dir hashing to down subvol")
        g.log.info("Expected: Tansport endpoint not connected")

        # Calling the local function
        self._create_xattr_check_self_heal()