def _check_if_files_are_stored_only_on_expected_bricks(self):
        """Check if files are stored only on expected bricks"""
        for fname in self.list_of_device_files:
            # Fetch trusted.glusterfs.pathinfo and check if file is present on
            # brick or not
            ret = get_pathinfo(self.clients[0], fname)
            self.assertIsNotNone(
                ret, "Unable to get "
                "trusted.glusterfs.pathinfo  of file %s" % fname)
            present_brick_list = []
            for brick_path in ret['brickdir_paths']:
                node, path = brick_path.split(":")
                ret = file_exists(node, path)
                self.assertTrue(
                    ret,
                    "Unable to find file {} on brick {}".format(fname, path))
                brick_text = brick_path.split('/')[:-1]
                if brick_text[0][0:2].isdigit():
                    brick_text[0] = gethostbyname(brick_text[0][:-1]) + ":"
                present_brick_list.append('/'.join(brick_text))

            # Check on other bricks where file doesn't exist
            brick_list = get_all_bricks(self.mnode, self.volname)
            other_bricks = [
                brk for brk in brick_list if brk not in present_brick_list
            ]
            for brick in other_bricks:
                node, path = brick.split(':')
                ret = file_exists(node, "{}/{}".format(path,
                                                       fname.split('/')[-1]))
                self.assertFalse(
                    ret, "Unexpected: Able to find file {} on "
                    "brick {}".format(fname, path))
    def test_selinux_label(self):
        """
        TestCase:
        1. Check the existence of '/usr/lib/firewalld/services/glusterfs.xml'
        2. Validate the owner of this file as 'glusterfs-server'
        3. Validate SELinux label context as 'system_u:object_r:lib_t:s0'
        """

        fqpath = '/usr/lib/firewalld/services/glusterfs.xml'

        for server in self.all_servers_info:
            # Check existence of xml file
            self.assertTrue(file_exists(server, fqpath), "Failed to verify "
                            "existence of '{}' in {} ".format(fqpath, server))
            g.log.info("Validated the existence of required xml file")

            # Check owner of xml file
            status, result = self.run_cmd(server, 'rpm', 'qf', fqpath)
            self.assertTrue(status, "Fail: Not able to find owner for {} on "
                            "{}".format(fqpath, server))
            exp_str = 'glusterfs-server'
            self.assertIn(exp_str, result, "Fail: Owner of {} should be "
                          "{} on {}".format(fqpath, exp_str, server))

            # Validate SELinux label
            status, result = self.run_cmd(server, 'ls', 'lZ', fqpath)
            self.assertTrue(status, "Fail: Not able to find SELinux label "
                            "for {} on {}".format(fqpath, server))
            exp_str = 'system_u:object_r:lib_t:s0'
            self.assertIn(exp_str, result, "Fail: SELinux label on {}"
                          "should be {} on {}".format(fqpath, exp_str, server))
    def _perform_glusterfind_pre_and_validate_outfile(self):
        """
        Function to perform glusterfind pre and validate outfile
        """
        # Perform glusterfind pre for the session
        ret, _, _ = gfind_pre(self.mnode, self.volname, self.session,
                              self.outfiles[0], full=True, noencode=True,
                              debug=True)
        self.assertEqual(ret, 0, ("Failed to perform glusterfind pre"))
        g.log.info("Successfully performed glusterfind pre")

        # Check if the outfile exists
        ret = file_exists(self.mnode, self.outfiles[0])
        self.assertTrue(ret, ("Unexpected: File '%s' does not exist"
                              % self.outfiles[0]))
        g.log.info("Successfully validated existence of '%s'",
                   self.outfiles[0])

        # Check if all the files are listed in the outfile
        for i in range(1, self.file_limit+1):
            ret = check_if_pattern_in_file(self.mnode, "file%s" % i,
                                           self.outfiles[0])
            self.assertEqual(ret, 0, ("File 'file%s' not listed in %s"
                                      % (i, self.outfiles[0])))
            g.log.info("File 'file%s' listed in %s", i, self.outfiles[0])
    def _perform_io_and_validate_presence_of_files(self):
        """
        Function to perform the IO and validate the presence of files.
        """
        self.file_limit += 10
        # Starting IO on the mounts
        cmd = ("cd %s ; touch file{%d..%d}" % (self.mounts[0].mountpoint,
                                               self.file_limit-10,
                                               self.file_limit))

        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to create files on mountpoint")
        g.log.info("Files created successfully on mountpoint")

        # Gather the list of files from the mount point
        files = list_files(self.mounts[0].client_system,
                           self.mounts[0].mountpoint)
        self.assertIsNotNone(files, "Failed to get the list of files")
        g.log.info("Successfully gathered the list of files from mount point")

        # Check if the files exist
        for filename in files:
            ret = file_exists(self.mounts[0].client_system, filename)
            self.assertTrue(ret, ("Unexpected: File '%s' does not exist"
                                  % filename))
            g.log.info("Successfully validated existence of '%s'", filename)
    def test_character_and_block_device_file_removal_using_rm(self):
        """
        Test case:
        1. Create distributed volume with 5 sub-volumes, start and mount it.
        2. Create character and block device files.
        3. Check filetype of files from mount point.
        4. Verify that the files are stored on only one bricks which is
           mentioned in trusted.glusterfs.pathinfo xattr.
        5. Delete the files.
        6. Verify if the files are delete from all the bricks
        """
        # Create Character and block device files
        self._create_character_and_block_device_files()

        # Check filetype of files from mount point
        self._check_filetype_of_files_from_mountpoint()

        # Verify that the files are stored on only the bricks which is
        # mentioned in trusted.glusterfs.pathinfo xattr
        self._check_if_files_are_stored_only_on_expected_bricks()

        # Delete both the character and block device files
        for fname in self.list_of_device_files:
            ret, _, _ = g.run(self.clients[0], 'rm -rf {}'.format(fname))
            self.assertEqual(ret, 0, 'Failed to remove {} file'.format(fname))

        # Verify if the files are deleted from all bricks or not
        for brick in get_all_bricks(self.mnode, self.volname):
            node, path = brick.split(':')
            for fname in self.file_names:
                ret = file_exists(node, "{}/{}".format(path, fname))
                self.assertFalse(
                    ret, "Unexpected: Able to find file {} on "
                    " brick {} even after deleting".format(fname, path))
Пример #6
0
 def _check_file_exists(self, subvol, directory, exists=True):
     """ Validates given directory present on brick path of each subvol """
     for each_brick in subvol:
         node, brick_path = each_brick.split(":")
         path = brick_path + directory
         ret = file_exists(node, path)
         self.assertEqual(
             exists, ret, "Unexpected behaviour, existence "
             "check of directory {} on brick returned"
             " {}".format(directory, each_brick))
Пример #7
0
    def _is_file_present_on_brick(self, file_name):
        """Check if file is created on the backend-bricks as per
        the value of trusted.glusterfs.pathinfo xattr"""
        brick_list = get_pathinfo(self.client,
                                  "{}/{}".format(self.m_point, file_name))
        self.assertNotEqual(brick_list, 0,
                            "Failed to get bricklist for {}".format(file_name))

        for brick in brick_list['brickdir_paths']:
            host, path = brick.split(':')
            ret = file_exists(host, path)
            self.assertTrue(
                ret, "File {} is not present on {}".format(file_name, brick))
            g.log.info("File %s is present on %s", file_name, brick)
    def setUpClass(cls):
        cls.get_super_method(cls, 'setUpClass')()

        cls.first_client = cls.mounts[0].client_system
        cls.mountpoint = cls.mounts[0].mountpoint
        cls.is_io_running = False

        # Upload IO scripts for running IO on mounts
        cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
                                  "file_dir_ops.py")
        if not file_exists(cls.first_client, cls.script_upload_path):
            if not upload_scripts(cls.first_client, cls.script_upload_path):
                raise ExecutionError(
                    "Failed to upload IO scripts to client %s" %
                    cls.first_client)
Пример #9
0
def run_linux_untar(clients, mountpoint, dirs=('.')):
    """Run linux kernal untar on a given mount point

    Args:
      clients(str|list): Client nodes on which I/O
                         has to be started.
      mountpoint(str): Mount point where the volume is
                       mounted.
    Kwagrs:
       dirs(tuple): A tuple of dirs where untar has to
                    started. (Default:('.'))
    Returns:
       list: Returns a list of process object else None
    """
    # Checking and convering clients to list.
    if not isinstance(clients, list):
        clients = [clients]

    list_of_procs = []
    for client in clients:
        # Download linux untar to root, so that it can be
        # utilized in subsequent run_linux_untar() calls.
        cmd = ("wget https://cdn.kernel.org/pub/linux/kernel/"
               "v5.x/linux-5.4.54.tar.xz")
        if not file_exists(client, '/root/linux-5.4.54.tar.xz'):
            ret, _, _ = g.run(client, cmd)
            if ret:
                return None

        for directory in dirs:
            # copy linux tar to dir
            cmd = ("cp /root/linux-5.4.54.tar.xz {}/{}".format(
                mountpoint, directory))
            ret, _, _ = g.run(client, cmd)
            if ret:
                return None
            # Start linux untar
            cmd = ("cd {}/{};tar -xvf linux-5.4.54.tar.xz".format(
                mountpoint, directory))
            proc = g.run_async(client, cmd)
            list_of_procs.append(proc)

    return list_of_procs
    def _check_contents_of_outfile(self, gftype):
        """Check contents of outfile created by query and pre"""
        if gftype == 'f':
            content = self.list_of_files
        elif gftype == 'd':
            content = self.list_of_dirs
        else:
            content = self.list_of_files + self.list_of_dirs

        # Check if outfile is created or not
        ret = file_exists(self.mnode, self.outfile)
        self.assertTrue(ret,
                        "Unexpected: File '%s' does not exist" % self.outfile)

        for value in content:
            ret = check_if_pattern_in_file(self.mnode, value, self.outfile)
            self.assertEqual(
                ret, 0,
                "Entry for '%s' not listed in %s" % (value, self.outfile))
def check_upload_memory_and_cpu_logger_script(servers):
    """Check and upload memory_and_cpu_logger.py to servers if not present

    Args:
     servers(list): List of all servers where script has to be uploaded

    Returns:
     bool: True if script is uploaded successfully else false
    """
    script = "/usr/share/glustolibs/io/scripts/memory_and_cpu_logger.py"
    is_present = []
    for server in servers:
        if not file_exists(server, script):
            if not upload_scripts(server, script):
                g.log.error("Unable to upload memory_and_cpu_logger.py on %s",
                            server)
                is_present.append(False)
            else:
                is_present.append(True)
    return all(is_present)
    def test_gfind_modify(self):
        """
        Verifying the glusterfind functionality with deletion of files.

        * Create a volume
        * Create a session on the volume
        * Create various files from mount point
        * Perform glusterfind pre
        * Perform glusterfind post
        * Check the contents of outfile
        * Modify the contents of the files from mount point
        * Perform glusterfind pre
        * Perform glusterfind post
        * Check the contents of outfile
          Files modified must be listed
        """

        # pylint: disable=too-many-statements
        # Create a session for the volume
        ret, _, _ = gfind_create(self.mnode, self.volname, self.session)
        self.assertEqual(ret, 0, ("Unexpected: Creation of a session for the "
                                  "volume %s failed" % self.volname))
        g.log.info("Successfully created a session for the volume %s",
                   self.volname)

        # Perform glusterfind list to check if session exists
        _, out, _ = gfind_list(self.mnode, volname=self.volname,
                               sessname=self.session)
        self.assertNotEqual(out, "No sessions found.",
                            "Failed to list the glusterfind session")
        g.log.info("Successfully listed the glusterfind session")

        # Starting IO on the mounts
        cmd = "cd %s ; touch file{1..10}" % self.mounts[0].mountpoint
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to create files on mountpoint")
        g.log.info("Files created successfully on mountpoint")

        # Gather the list of files from the mount point
        files = list_files(self.mounts[0].client_system,
                           self.mounts[0].mountpoint)
        self.assertIsNotNone(files, "Failed to get the list of files")
        g.log.info("Successfully gathered the list of files from mount point")

        # Check if the files exist
        for filename in files:
            ret = file_exists(self.mounts[0].client_system, filename)
            self.assertTrue(ret, ("Unexpected: File '%s' does not exist"
                                  % filename))
            g.log.info("Successfully validated existence of '%s'", filename)

        # Wait for changelog to get updated
        sleep(2)

        # Perform glusterfind pre for the session
        ret, _, _ = gfind_pre(self.mnode, self.volname, self.session,
                              self.outfiles[0], full=True, noencode=True,
                              debug=True)
        self.assertEqual(ret, 0, ("Failed to perform glusterfind pre"))
        g.log.info("Successfully performed glusterfind pre")

        # Check if the outfile exists
        ret = file_exists(self.mnode, self.outfiles[0])
        self.assertTrue(ret, ("Unexpected: File '%s' does not exist"
                              % self.outfiles[0]))
        g.log.info("Successfully validated existence of '%s'",
                   self.outfiles[0])

        # Check if all the files are listed in the outfile
        for i in range(1, 11):
            ret = check_if_pattern_in_file(self.mnode, "file%s" % i,
                                           self.outfiles[0])
            self.assertEqual(ret, 0, ("File 'file%s' not listed in %s"
                                      % (i, self.outfiles[0])))
            g.log.info("File 'file%s' listed in %s", i, self.outfiles[0])

        # Perform glusterfind post for the session
        ret, _, _ = gfind_post(self.mnode, self.volname, self.session)
        self.assertEqual(ret, 0, ("Failed to perform glusterfind post"))
        g.log.info("Successfully performed glusterfind post")

        # Modify the files created from mount point
        mod_string = "this is a test string\n"
        for filenum in files:
            ret = append_string_to_file(self.mounts[0].client_system, filenum,
                                        mod_string)
            self.assertTrue(ret, "Failed to append to file '%s'" % filenum)
        g.log.info("Successfully modified all the files")

        # Check if the files modified exist from mount point
        for filenum in files:
            ret = check_if_pattern_in_file(self.mounts[0].client_system,
                                           mod_string, filenum)
            self.assertEqual(ret, 0, ("Unexpected: File '%s' does not contain"
                                      " the string '%s' after being modified"
                                      % (filenum, mod_string)))
            g.log.info("Successfully validated '%s' is modified", filenum)

        # Wait for changelog to get updated
        sleep(2)

        # Perform glusterfind pre for the session
        ret, _, _ = gfind_pre(self.mnode, self.volname, self.session,
                              self.outfiles[1], debug=True)
        self.assertEqual(ret, 0, ("Failed to perform glusterfind pre"))
        g.log.info("Successfully performed glusterfind pre")

        # Check if the outfile exists
        ret = file_exists(self.mnode, self.outfiles[1])
        self.assertTrue(ret, ("Unexpected: File '%s' does not exist"
                              % self.outfiles[1]))
        g.log.info("Successfully validated existence of outfile '%s'",
                   self.outfiles[1])

        # Check if all the files are listed in the outfile
        for num in range(1, 11):
            pattern = "MODIFY file%s" % num
            ret = check_if_pattern_in_file(self.mnode, pattern,
                                           self.outfiles[1])
            self.assertEqual(ret, 0, ("File 'file%s' not listed in '%s'"
                                      % (num, self.outfiles[1])))
            g.log.info("File 'file%s' listed in '%s'", num, self.outfiles[1])
Пример #13
0
def validate_files_in_dir(mnode,
                          rootdir,
                          file_type=k.FILETYPE_ALL,
                          test_type=k.TEST_ALL):
    """walk a directory tree and check if layout is_complete.

    Args:
        mnode (str): The host of the directory being traversed.
        rootdir (str): The fully qualified path of the dir being traversed.
        file_type (int): An or'd set of constants defining the file types
                        to test.
                            FILETYPE_DIR
                            FILETYPE_DIRS
                            FILETYPE_FILE
                            FILETYPE_FILES
                            FILETYPE_ALL

        test_type (int): An or'd set of constants defining the test types
                        to run.
                            TEST_LAYOUT_IS_COMPLETE
                            TEST_LAYOUT_IS_BALANCED
                            TEST_FILE_EXISTS_ON_HASHED_BRICKS
                            TEST_ALL

    Examples:
        # TEST LAYOUTS FOR FILES IN A DIRECTORY

        validate_files_in_dir(clients[0], '/mnt/glusterfs')
        validate_files_in_dir(clients[0], '/mnt/glusterfs',
                              file_type=k.FILETYPE_DIRS)
        validate_files_in_dir(clients[0], '/mnt/glusterfs',
                              file_type=k.FILETYPE_FILES)
        validate_files_in_dir(clients[0], '/mnt/glusterfs',
                              test_type=k.TEST_LAYOUT_IS_COMPLETE,
                              file_type=(k.FILETYPE_DIRS | k.FILETYPE_FILES))
        validate_files_in_dir(clients[0], '/mnt/glusterfs',
                              test_type=k.TEST_LAYOUT_IS_BALANCED)
        validate_files_in_dir(clients[0], '/mnt/glusterfs',
                              test_type=k.TEST_LAYOUT_IS_BALANCED,
                              file_type=k.FILETYPE_FILES)

        # TEST FILES IN DIRECTORY EXIST ON HASHED BRICKS
        validate_files_in_dir(clients[0], '/mnt/glusterfs',
                              test_type=k.TEST_FILE_EXISTS_ON_HASHED_BRICKS)
    """
    layout_cache = {}

    script_path = ("/usr/share/glustolibs/scripts/walk_dir.py")
    if not file_exists(mnode, script_path):
        if upload_scripts(mnode, script_path,
                          "/usr/share/glustolibs/scripts/"):
            g.log.info("Successfully uploaded script " "walk_dir.py!")
        else:
            g.log.error("Faild to upload walk_dir.py!")
            return False
    else:
        g.log.info("compute_hash.py already present!")

    cmd = ("/usr/bin/env python {0} {1}".format(script_path, rootdir))
    ret, out, _ = g.run(mnode, cmd)
    if ret:
        g.log.error('Unable to run the script on node {0}'.format(mnode))
        return False
    for walkies in eval(out):
        g.log.info("TESTING DIRECTORY %s..." % walkies[0])

        # check directories
        if file_type & k.FILETYPE_DIR:
            for testdir in walkies[1]:
                fqpath = os.path.join(walkies[0], testdir)
                gdir = GlusterDir(mnode, fqpath)

                if gdir.parent_dir in layout_cache:
                    layout = layout_cache[gdir.parent_dir]
                else:
                    layout = Layout(gdir.parent_dir_pathinfo)
                    layout_cache[gdir.parent_dir] = layout

                    run_layout_tests(mnode, gdir.parent_dir, layout, test_type)

                if test_type & k.TEST_FILE_EXISTS_ON_HASHED_BRICKS:
                    run_hashed_bricks_test(gdir)

        # check files
        if file_type & k.FILETYPE_FILE:
            for file in walkies[2]:
                fqpath = os.path.join(walkies[0], file)
                gfile = GlusterFile(mnode, fqpath)

                if gfile.parent_dir in layout_cache:
                    layout = layout_cache[gfile.parent_dir]
                else:
                    layout = Layout(gfile.parent_dir_pathinfo)
                    layout_cache[gfile.parent_dir] = layout

                    run_layout_tests(mnode, gfile.parent_dir, layout,
                                     test_type)

                if test_type & k.TEST_FILE_EXISTS_ON_HASHED_BRICKS:
                    run_hashed_bricks_test(gfile)
    return True
Пример #14
0
    def test_distribution_hash_value(self):
        """Test case tests DHT of files and directories based on hash value
        """
        # pylint: disable=too-many-locals
        for client_index, mount_obj in enumerate(self.mounts):
            client_host = mount_obj.client_system
            mountpoint = mount_obj.mountpoint

            # Create directory for initial data
            g.log.debug("Creating temporary folder on client's machine %s:%s",
                        client_host, self.temp_folder)
            if not mkdir(client_host, self.temp_folder):
                g.log.error("Failed create temporary directory "
                            "on client machine %s:%s",
                            client_host, self.temp_folder)
                raise ExecutionError("Failed create temporary directory "
                                     "on client machine %s:%s" %
                                     (client_host, self.temp_folder))
            g.log.info('Created temporary directory on client machine %s:%s',
                       client_host, self.temp_folder)
            # Prepare a set of data
            files = ["{prefix}{file_name}_{client_index}".
                     format(file_name=file_name,
                            client_index=client_index,
                            prefix='' if randint(1, 6) % 2
                            else choice('ABCD') + '/')
                     for file_name in map(chr, range(97, 123))]
            ret = self.create_files(client_host, self.temp_folder,
                                    files,
                                    "Lorem Ipsum is simply dummy text of the "
                                    "printing and typesetting industry.")
            self.assertTrue(ret, "Failed creating a set of files and dirs "
                                 "on %s:%s" % (client_host, self.temp_folder))
            g.log.info('Created data set on client machine on folder %s:%s',
                       client_host, self.temp_folder)

            # Copy prepared data to mount point
            cmd = ('cp -vr {source}/* {destination}'.format(
                source=self.temp_folder,
                destination=mountpoint))
            ret, _, _ = g.run(client_host, cmd)
            self.assertEqual(ret, 0, "Copy data to mount point %s:%s Failed")
            g.log.info('Copied prepared data to mount point %s:%s',
                       client_host, mountpoint)

            # Verify that hash layout values are set on each
            # bricks for the dir
            g.log.debug("Verifying DHT layout")
            ret = validate_files_in_dir(client_host, mountpoint,
                                        test_type=TEST_LAYOUT_IS_COMPLETE)
            self.assertTrue(ret, "TEST_LAYOUT_IS_COMPLETE: FAILED")
            g.log.info("TEST_LAYOUT_IS_COMPLETE: PASS on %s:%s ",
                       client_host, mountpoint)

            g.log.debug("Verifying files and directories")
            ret = validate_files_in_dir(client_host, mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_DIRS)
            self.assertTrue(ret, "TEST_FILE_EXISTS_ON_HASHED_BRICKS: FAILED")
            g.log.info("TEST_FILE_EXISTS_ON_HASHED_BRICKS: PASS")

            # Verify "trusted.gfid" extended attribute of the
            # directory/file on all the bricks
            gfids = dict()
            g.log.debug("Check if trusted.gfid is presented on the bricks")
            for brick_item in get_all_bricks(self.mnode, self.volname):
                brick_host, brick_dir = brick_item.split(':')

                for target_destination in files:
                    if not file_exists(brick_host, '{brick_dir}/{dest}'.
                                       format(brick_dir=brick_dir,
                                              dest=target_destination)):
                        continue
                    ret = get_fattr(brick_host, '%s/%s' %
                                    (brick_dir, target_destination),
                                    'trusted.gfid')
                    self.assertIsNotNone(ret,
                                         "trusted.gfid is not presented "
                                         "on %s/%s" % (brick_dir,
                                                       target_destination))
                    g.log.info("Verified trusted.gfid on brick %s:%s",
                               brick_item, target_destination)
                    gfids.setdefault(target_destination, []).append(ret)

            g.log.debug('Check if trusted.gfid is same on all the bricks')
            self.assertTrue(all([False if len(set(gfids[k])) > 1 else True
                                 for k in gfids]),
                            "trusted.gfid should be same on all the bricks")
            g.log.info('trusted.gfid is same on all the bricks')
            # Verify that mount point shows pathinfo xattr.
            g.log.debug("Check if pathinfo is presented on mount point "
                        "%s:%s", client_host, mountpoint)
            ret = get_fattr(client_host, mountpoint,
                            'trusted.glusterfs.pathinfo')
            self.assertIsNotNone(ret, "pathinfo is not presented on mount "
                                      "point %s:%s" % (client_host,
                                                       mountpoint))

            g.log.info('trusted.glusterfs.pathinfo is presented on mount'
                       ' point %s:%s', client_host, mountpoint)

            # Mount point should not display xattr:
            # trusted.gfid and trusted.glusterfs.dht
            g.log.debug("Check if trusted.gfid and trusted.glusterfs.dht are "
                        "not presented on mount point %s:%s", client_host,
                        mountpoint)
            attributes = get_fattr_list(client_host, mountpoint)
            self.assertFalse('trusted.gfid' in attributes,
                             "Expected: Mount point shouldn't display xattr:"
                             "{xattr}. Actual: xattrs {xattr} is "
                             "presented on mount point".
                             format(xattr='trusted.gfid'))
            self.assertFalse('trusted.glusterfs.dht' in attributes,
                             "Expected: Mount point shouldn't display xattr:"
                             "{xattr}. Actual: xattrs {xattr} is "
                             "presented on mount point".
                             format(xattr='trusted.glusterfs.dht'))

            g.log.info("trusted.gfid and trusted.glusterfs.dht are not "
                       "presented on mount point %s:%s", client_host,
                       mountpoint)
        g.log.info('Files and dirs are stored on bricks based on hash value')
Пример #15
0
    def test_fops_ec_brickdown(self):
        # pylint: disable=too-many-branches,too-many-statements,too-many-locals
        """
        - 1.Start resource consumption tool
        - 2.Create directory dir1
        - 3.Create 5 dir and 5 files in each dir in directory 1
        - 4.Rename all file inside dir1
        - 5.Truncate at any dir in mountpoint inside dir1
        - 6.Create softlink and hardlink of files in mountpoint
        - 7.chmod, chown, chgrp inside dir1
        - 8.Create tiny, small, medium nd large file
        - 9.Creating files on client side for dir1
        - 10.Brick redundant bricks down
        - 11.Validating IO's and waiting to complete
        - 12.Creating dir2
        - 13.Creating files on client side for dir2
        - 14.Bring bricks online
        - 15.Wait for brick to come online
        - 16.Check if bricks are online
        - 17.Monitor heal completion
        - 18.Validating IO's and waiting to complete
        """

        # Starting resource consumption using top
        log_file_mem_monitor = '/var/log/glusterfs/mem_usage.log'
        cmd = ('for i in {1..100};do top -n 1 -b|egrep \
              "RES|gluster" & free -h 2>&1 >> %s ; \
              sleep 10;done' % (log_file_mem_monitor))
        g.log.info(cmd)
        for server in self.servers:
            g.run_async(server, cmd)
        bricks_list = []

        # get the bricks from the volume
        g.log.info("Fetching bricks for the volume : %s", self.volname)
        bricks_list = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, "Brick list is empty")
        g.log.info("Brick List : %s", bricks_list)

        # Creating dir1
        cmd = ('mkdir  %s/dir1' % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to create dir1")
        g.log.info("dir1 created successfully for %s", self.mounts[0])

        # Create 5 dir and 5 files in each dir at mountpoint on dir1
        start, end = 1, 5
        for mount_obj in self.mounts:
            # Number of dir and files to be created.
            dir_range = ("%s..%s" % (str(start), str(end)))
            file_range = ("%s..%s" % (str(start), str(end)))
            # Create dir 1-5 at mountpoint.
            cmd = ('mkdir %s/dir1/dir{%s};' %
                   (mount_obj.mountpoint, dir_range))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Directory creation failed")
            g.log.info("Directory created successfull")

            # Create files inside each dir.
            cmd = ('touch %s/dir1/dir{%s}/file{%s};' %
                   (mount_obj.mountpoint, dir_range, file_range))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "File creation failed")
            g.log.info("File created successfull")

            # Increment counter so that at next client dir and files are made
            # with diff offset. Like at next client dir will be named
            # dir6, dir7...dir10. Same with files.
            start += 5
            end += 5

        # Rename all files inside dir1 at mountpoint on dir1
        cmd = ('cd %s/dir1/dir1/; '
               'for FILENAME in *;'
               'do mv $FILENAME Unix_$FILENAME; '
               'done;' % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to rename file on" "client")
        g.log.info("Successfully renamed file on client")

        # Truncate at any dir in mountpoint inside dir1
        # start is an offset to be added to dirname to act on
        # diff files at diff clients.
        start = 1
        for mount_obj in self.mounts:
            cmd = ('cd %s/dir1/dir%s/; '
                   'for FILENAME in *;'
                   'do echo > $FILENAME; '
                   'done;' % (mount_obj.mountpoint, str(start)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Truncate failed")
            g.log.info("Truncate of files successfull")

        # Create softlink and hardlink of files in mountpoint. Start is an
        # offset to be added to dirname to act on diff files at diff clients.
        start = 1
        for mount_obj in self.mounts:
            cmd = ('cd %s/dir1/dir%s; '
                   'for FILENAME in *; '
                   'do ln -s $FILENAME softlink_$FILENAME; '
                   'done;' % (mount_obj.mountpoint, str(start)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Creating Softlinks have failed")
            g.log.info("Softlink of files have been changed successfully")

            cmd = ('cd %s/dir1/dir%s; '
                   'for FILENAME in *; '
                   'do ln $FILENAME hardlink_$FILENAME; '
                   'done;' % (mount_obj.mountpoint, str(start + 1)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Creating Hardlinks have failed")
            g.log.info("Hardlink of files have been changed successfully")
            start += 5

        # chmod, chown, chgrp inside dir1
        # start and end used as offset to access diff files
        # at diff clients.
        start, end = 2, 5
        for mount_obj in self.mounts:
            dir_file_range = '%s..%s' % (str(start), str(end))
            cmd = ('chmod 777 %s/dir1/dir{%s}/file{%s}' %
                   (mount_obj.mountpoint, dir_file_range, dir_file_range))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Changing mode of files has failed")
            g.log.info("Mode of files have been changed successfully")

            cmd = ('chown root %s/dir1/dir{%s}/file{%s}' %
                   (mount_obj.mountpoint, dir_file_range, dir_file_range))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Changing owner of files has failed")
            g.log.info("Owner of files have been changed successfully")

            cmd = ('chgrp root %s/dir1/dir{%s}/file{%s}' %
                   (mount_obj.mountpoint, dir_file_range, dir_file_range))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Changing group of files has failed")
            g.log.info("Group of files have been changed successfully")
            start += 5
            end += 5

        # Create tiny, small, medium nd large file
        # at mountpoint. Offset to differ filenames
        # at diff clients.
        offset = 1
        for mount_obj in self.mounts:
            cmd = 'fallocate -l 100 tiny_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for tiny files failed")
            g.log.info("Fallocate for tiny files successfully")

            cmd = 'fallocate -l 20M small_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for small files failed")
            g.log.info("Fallocate for small files successfully")

            cmd = 'fallocate -l 200M medium_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for medium files failed")
            g.log.info("Fallocate for medium files successfully")

            cmd = 'fallocate -l 1G large_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for large files failed")
            g.log.info("Fallocate for large files successfully")
            offset += 1

        # Creating files on client side for dir1
        # Write IO
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s/dir1" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Bring down other bricks to max redundancy
        # Bringing bricks offline
        ret = bring_bricks_offline(self.volname, bricks_list[2:4])
        self.assertTrue(ret, 'Bricks not offline')
        g.log.info('Bricks are offline successfully')

        # Validating IO's and waiting to complete
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Creating dir2
        cmd = ('mkdir  %s/dir2' % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to create dir2 ")
        g.log.info("dir2 created successfully for %s", self.mounts[0])

        # Creating files on client side for dir2
        # Write IO
        all_mounts_procs = []
        count = 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s/dir2" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count = count + 10

        # Bring bricks online
        list_of_bricks_to_bring_online = bricks_list[2:4]
        ret = bring_bricks_online(self.mnode, self.volname,
                                  list_of_bricks_to_bring_online)
        self.assertTrue(ret, 'Bricks not brought online')
        g.log.info('Bricks are online successfully')

        # Wait for brick to come online
        g.log.info("Waiting for brick to come online")
        ret = wait_for_bricks_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, "Bricks are not online")
        g.log.info("EXPECTED : Bricks are online")

        # Check if bricks are online
        ret = get_offline_bricks_list(self.mnode, self.volname)
        self.assertListEqual(ret, [], 'All bricks are not online')
        g.log.info('All bricks are online')

        # Monitor heal completion
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal has not yet completed')
        g.log.info('Heal has completed successfully')

        # Validating IO's and waiting to complete
        g.log.info("Validating IO's")
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Check file exist for memory log
        g.log.info("Validating log exists")
        ret = file_exists(self.mnode, '/var/log/glusterfs/mem_usage.log')
        self.assertTrue(ret, "Memory log file does not exist")
        g.log.info("Memory log file exists")
Пример #16
0
    def test_gfind_pre_cli(self):
        """
        Verifying the glusterfind pre command functionality with valid
        and invalid values for the required and optional parameters.

        * Create a volume
        * Create a session on the volume
        * Perform some I/O from the mount point
        * Perform glusterfind pre with the following combinations:
            - Valid values for required parameters
            - Invalid values for required parameters
            - Valid values for optional parameters
            - Invalid values for optional parameters
        * Perform glusterfind post

            Where
            Required parameters: volname, sessname and outfile
            Optional parameters: full, debug, gftype, tagforfullfind,
                                 namespace, noencode, disablepartial,
                                 regenoutfile, outprefix, fieldsep
        """

        # pylint: disable=too-many-statements
        # Creating a session for the volume
        g.log.info("Creating a session for the volume %s", self.volname)
        ret, _, _ = gfind_create(self.mnode, self.volname, self.session)
        self.assertEqual(ret, 0, ("Unexpected: Creation of a session for the "
                                  "volume %s failed" % self.volname))
        g.log.info("Successfully created a session for the volume %s",
                   self.volname)

        # Perform glusterfind list to check if session exists
        g.log.info("Performing glusterfind list to check if the session is "
                   "created")
        ret, _, _ = gfind_list(self.mnode,
                               volname=self.volname,
                               sessname=self.session)
        self.assertEqual(ret, 0, "Failed to list the glusterfind session")
        g.log.info("Successfully listed the glusterfind session")

        # Starting IO on the mounts
        mount_obj = self.mounts[0]
        mount_dir = mount_obj.mountpoint
        client = mount_obj.client_system

        g.log.info("Creating Files on %s:%s", client, mount_dir)
        cmd = ("cd %s ; for i in `seq 1 10` ; "
               "do dd if=/dev/urandom of=file$i bs=1M count=1 ; "
               "done" % mount_dir)
        ret, _, _ = g.run(client, cmd)
        self.assertEqual(ret, 0, "Failed to create files on mountpoint")
        g.log.info("Files created successfully on mountpoint")

        # Check if the files exist
        g.log.info("Checking the existence of files created during IO")
        for i in range(1, 11):
            ret = file_exists(client, '%s/file%s' % (mount_dir, i))
            self.assertTrue(ret,
                            "Unexpected: File 'file%s' does not exist" % i)
            g.log.info("Successfully validated existence of 'file%s'", i)

        # Perform glusterfind pre for the session
        g.log.info("Performing glusterfind pre for the session %s",
                   self.session)
        ret, _, _ = gfind_pre(self.mnode,
                              self.volname,
                              self.session,
                              self.outfile,
                              full=True,
                              noencode=True,
                              debug=True)
        self.assertEqual(ret, 0, ("Failed to perform glusterfind pre"))
        g.log.info("Successfully performed glusterfind pre")

        # Check if the outfile exists
        g.log.info("Checking if outfile created during glusterfind pre command"
                   " exists")
        ret = file_exists(self.mnode, self.outfile)
        self.assertTrue(ret,
                        "Unexpected: File '%s' does not exist" % self.outfile)
        g.log.info("Successfully validated existence of '%s'", self.outfile)

        # Check if all the files are listed in the outfile
        for i in range(1, 11):
            ret = check_if_pattern_in_file(self.mnode, 'file%s' % i,
                                           self.outfile)
            self.assertEqual(ret, 0, ("File 'file%s' not listed in %s" %
                                      (i, self.outfile)))
            g.log.info("File 'file%s' listed in %s", i, self.outfile)

        # Perform glusterfind post for the session
        g.log.info("Performing glusterfind post for the session %s",
                   self.session)
        ret, _, _ = gfind_post(self.mnode, self.volname, self.session)
        self.assertEqual(ret, 0, ("Failed to perform glusterfind post"))
        g.log.info("Successfully performed glusterfind post")

        # Perform glusterfind pre using the invalid values for required
        # parameters
        not_volume = 'invalid-volume-name'
        not_session = 'invalid-session-name'
        not_outfile = '/tmp/not'
        g.log.info("Performing glusterfind pre with invalid values for the "
                   "required parameters")
        ret, _, _ = gfind_pre(self.mnode, not_volume, not_session, not_outfile)
        self.assertNotEqual(
            ret, 0, "Unexpected: glusterfind pre Successful "
            "even with invalid values for required parameters")
        g.log.info("Successful: glusterfind pre failed with invalid values "
                   "for required parameters")

        # Perform glusterfind pre using the invalid values for optional
        # parameters
        g.log.info("Deleting the session with invalid values for the optional "
                   "parameters")
        invalid_options = [
            ' --dbug', ' --noencod', ' --regenout', ' --type n',
            ' --tagforfullfind', ' --disablepartial', ' --fll'
            ' --outprefix none', ' --namespc'
        ]
        for opt in invalid_options:
            ret, _, _ = g.run(
                self.mnode, ("glusterfind pre %s %s %s %s" %
                             (self.volname, self.session, self.outfile, opt)))
            self.assertNotEqual(
                ret, 0, "Unexpected: glusterfind pre "
                " successful for option %s which is invalid" % opt)
        g.log.info("Successful: glusterfind pre failed with invalid value "
                   "for optional parameters")
Пример #17
0
    def test_snap_uss(self):
        # pylint: disable=too-many-statements
        """
        Steps:
        1. Create a volume and mount it.
        2. Perform I/O on mounts
        3. create a .snaps directory and create some files
        4. Create Multiple snapshots of volume
        5. Check info of volume
        6. Enable USS for volume
        7. Validate files created under .snaps
        8. Disable USS
        9. Again Validate the files created under .snaps directory
        """
        # write files on all mounts
        g.log.info("Starting IO on all mounts...")
        g.log.info("mounts: %s", self.mounts)
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name file %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # Validate IO
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")

        # starting I/O
        g.log.info("Starting IO on all mounts...")
        for mount_obj in self.mounts:
            self.mpoint = "%s/.snaps" % mount_obj.mountpoint
            ret = file_exists(mount_obj.client_system, self.mpoint)
            if not ret:
                ret = mkdir(mount_obj.client_system, self.mpoint)
                self.assertTrue(ret, "Failed to create .snaps directory")
                g.log.info("Successfully created .snaps directory")
                break
            else:
                # Validate USS running
                g.log.info("Validating USS enabled or disabled")
                ret = is_uss_enabled(self.mnode, self.volname)
                if not ret:
                    break
                else:
                    g.log.info("USS is enabled in volume %s", self.volname)
                    ret, _, _ = disable_uss(self.mnode, self.volname)
                    self.assertEqual(
                        ret, 0, "Failed to disable USS on "
                        " volume %s" % self.volname)
                    g.log.info("USS disabled in Volume %s", self.volname)
                    ret = mkdir(mount_obj.client_system, self.mpoint)
                    self.assertTrue(ret, "Failed to create .snaps directory")
                    g.log.info("Successfully created .snaps directory")
            cmd = ("/usr/bin/env python %s create_files "
                   "-f 10 --base-file-name foo %s" %
                   (self.script_upload_path, self.mpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)

        # List files under created .snaps directory
        g.log.info("Starting to list files under .snaps directory")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(ret, 0, "Failed to list files under .snaps")
            g.log.info("Successfully Created files under .snaps directory")
            before_uss_enable = out.strip().split('\n')
            # deleting the mount path from list
            del before_uss_enable[0]

        # Create Multiple snapshots for volume
        g.log.info("Creating snapshots")
        self.snaps_list = []
        for snap_count in range(1, 5):
            self.snap = "snap%s" % snap_count
            ret, _, _ = snap_create(self.mnode, self.volname, self.snap)
            self.assertEqual(
                ret, 0, "Failed to create snapshot "
                "%s for volume %s" % (self.snap, self.volname))
            self.snaps_list.append(self.snap)
            g.log.info("Snapshot %s created successfully for volume %s",
                       self.snap, self.volname)
        g.log.info("Snapshot Creation Successful")

        # Activate the snapshots
        g.log.info("Activating snapshots")
        for snap_count in range(1, 5):
            self.snap = "snap%s" % snap_count
            ret, _, _ = snap_activate(self.mnode, self.snap)
            self.assertEqual(ret, 0,
                             ("Failed to activate snapshot %s" % self.snap))
            g.log.info("Snapshot snap%s activated successfully", self.snap)

        # snapshot list
        g.log.info("Starting to list snapshots")
        ret, out, _ = snap_list(self.mnode)
        self.assertEqual(ret, 0, "Failed to list snapshot")
        snap_count = out.strip().split("\n")
        self.assertEqual(len(snap_count), 4, "Failed to list all snaps")
        g.log.info("Snapshot list Validated successfully")

        # Enable USS
        g.log.info("Enable USS on volume")
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on cloned volume")
        g.log.info("Successfully enabled USS on Cloned volume")

        # Validate USS running
        g.log.info("Validating USS enabled or disabled")
        ret = is_uss_enabled(self.mnode, self.volname)
        self.assertTrue(ret, ("USS is disabled in volume %s" % self.volname))
        g.log.info("USS enabled in Volume %s", self.volname)

        # Validate snapshots under .snaps folder
        self.validate_snaps()

        # check snapshots are listed
        g.log.info(".snaps Containing:")
        for mount_obj in self.mounts:
            ret, _, _ = uss_list_snaps(mount_obj.client_system,
                                       mount_obj.mountpoint)
            self.assertEqual(ret, 0, "Failed to list snapshot information")
            g.log.info("Successfully Listed snapshots Created")

        # Disable USS running
        g.log.info("Disable USS on volume")
        ret, _, _ = disable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to disable USS on volume")
        g.log.info("Successfully disabled USS on volume")

        # check snapshots are listed
        g.log.info(".snaps Containing:")
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(ret, 0, "Failed to list snapshot information")
            g.log.info("Successfully listed snapshots Created")

        # Validate after disabling USS, all files should be same
        g.log.info("Validate files after disabling uss")
        after_uss_disable = out.strip().split('\n')
        # deleting the mount path from list
        del after_uss_disable[0]
        for files in before_uss_enable:
            self.assertIn(files, after_uss_disable,
                          "Files are Same under .snaps")
        g.log.info("Validated files under .snaps directory")
Пример #18
0
    def test_ec_replace_brick(self):
        """
        - Start resource consumption tool
        - Create directory dir1
        - Create 5 directory and 5 files in dir of mountpoint
        - Rename all files inside dir1 at mountpoint
        - Create softlink and hardlink of files in dir1 of mountpoint
        - Delete op for deleting all file in one of the dirs inside dir1
        - Change chmod, chown, chgrp
        - Create tiny, small, medium and large file
        - Get arequal before replacing brick
        - Replace brick
        - Get arequal after replacing brick
        - Compare Arequal's
        - Create IO's
        - Replace brick while IO's are going on
        - Validating IO's and waiting for it to complete
        """
        # pylint: disable=too-many-branches,too-many-statements,too-many-locals
        # Starting resource consumption using top
        log_file_mem_monitor = '/var/log/glusterfs/mem_usage.log'
        cmd = ("for i in {1..20};do top -n 1 -b|egrep "
               "'RES|gluster' & free -h 2>&1 >> %s ;"
               "sleep 10;done" % (log_file_mem_monitor))
        g.log.info(cmd)
        cmd_list_procs = []
        for server in self.servers:
            proc = g.run_async(server, cmd)
            cmd_list_procs.append(proc)

        # Creating dir1
        ret = mkdir(self.mounts[0].client_system,
                    "%s/dir1" % self.mounts[0].mountpoint)
        self.assertTrue(ret, "Failed to create dir1")
        g.log.info("Directory dir1 on %s created successfully", self.mounts[0])

        # Create 5 dir and 5 files in each dir at mountpoint on dir1
        start, end = 1, 5
        for mount_obj in self.mounts:
            # Number of dir and files to be created.
            dir_range = ("%s..%s" % (str(start), str(end)))
            file_range = ("%s..%s" % (str(start), str(end)))
            # Create dir 1-5 at mountpoint.
            ret = mkdir(mount_obj.client_system,
                        "%s/dir1/dir{%s}" % (mount_obj.mountpoint, dir_range))
            self.assertTrue(ret, "Failed to create directory")
            g.log.info("Directory created successfully")

            # Create files inside each dir.
            cmd = ('touch %s/dir1/dir{%s}/file{%s};' %
                   (mount_obj.mountpoint, dir_range, file_range))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "File creation failed")
            g.log.info("File created successfull")

            # Increment counter so that at next client dir and files are made
            # with diff offset. Like at next client dir will be named
            # dir6, dir7...dir10. Same with files.
            start += 5
            end += 5

        # Rename all files inside dir1 at mountpoint on dir1
        cmd = ('cd %s/dir1/dir1/; '
               'for FILENAME in *;'
               'do mv $FILENAME Unix_$FILENAME; cd ~;'
               'done;' % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to rename file on " "client")
        g.log.info("Successfully renamed file on client")

        # Truncate at any dir in mountpoint inside dir1
        # start is an offset to be added to dirname to act on
        # diff files at diff clients.
        start = 1
        for mount_obj in self.mounts:
            cmd = ('cd %s/dir1/dir%s/; '
                   'for FILENAME in *;'
                   'do echo > $FILENAME; cd ~;'
                   'done;' % (mount_obj.mountpoint, str(start)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Truncate failed")
            g.log.info("Truncate of files successfull")

        # Create softlink and hardlink of files in mountpoint. Start is an
        # offset to be added to dirname to act on diff files at diff clients.
        start = 1
        for mount_obj in self.mounts:
            cmd = ('cd %s/dir1/dir%s; '
                   'for FILENAME in *; '
                   'do ln -s $FILENAME softlink_$FILENAME; cd ~;'
                   'done;' % (mount_obj.mountpoint, str(start)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Creating Softlinks have failed")
            g.log.info("Softlink of files have been changed successfully")

            cmd = ('cd %s/dir1/dir%s; '
                   'for FILENAME in *; '
                   'do ln $FILENAME hardlink_$FILENAME; cd ~;'
                   'done;' % (mount_obj.mountpoint, str(start + 1)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Creating Hardlinks have failed")
            g.log.info("Hardlink of files have been changed successfully")
            start += 5

        # chmod, chown, chgrp inside dir1
        # start and end used as offset to access diff files
        # at diff clients.
        start, end = 2, 5
        for mount_obj in self.mounts:
            dir_file_range = '%s..%s' % (str(start), str(end))
            cmd = ('chmod 777 %s/dir1/dir{%s}/file{%s}' %
                   (mount_obj.mountpoint, dir_file_range, dir_file_range))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Changing mode of files has failed")
            g.log.info("Mode of files have been changed successfully")

            cmd = ('chown root %s/dir1/dir{%s}/file{%s}' %
                   (mount_obj.mountpoint, dir_file_range, dir_file_range))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Changing owner of files has failed")
            g.log.info("Owner of files have been changed successfully")

            cmd = ('chgrp root %s/dir1/dir{%s}/file{%s}' %
                   (mount_obj.mountpoint, dir_file_range, dir_file_range))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Changing group of files has failed")
            g.log.info("Group of files have been changed successfully")
            start += 5
            end += 5

        # Create tiny, small, medium and large file
        # at mountpoint. Offset to differ filenames
        # at diff clients.
        offset = 1
        for mount_obj in self.mounts:
            cmd = 'fallocate -l 100 tiny_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for tiny files failed")
            g.log.info("Fallocate for tiny files successfully")

            cmd = 'fallocate -l 20M small_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for small files failed")
            g.log.info("Fallocate for small files successfully")

            cmd = 'fallocate -l 200M medium_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for medium files failed")
            g.log.info("Fallocate for medium files successfully")

            cmd = 'fallocate -l 1G large_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for large files failed")
            g.log.info("Fallocate for large files successfully")
            offset += 1

        # Get arequal before replacing brick
        ret, result_before_replacing_brick = (collect_mounts_arequal(
            self.mounts[0]))
        self.assertTrue(ret, 'Failed to get arequal')
        g.log.info('Getting arequal before replacing of brick '
                   'is successful')

        # Replacing a brick of random choice
        ret = replace_brick_from_volume(self.mnode, self.volname, self.servers,
                                        self.all_servers_info)
        self.assertTrue(ret, "Unexpected:Replace brick is not successful")
        g.log.info("Expected : Replace brick is successful")

        # Wait for brick to come online
        ret = wait_for_bricks_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, "Unexpected:Bricks are not online")
        g.log.info("Expected : Bricks are online")

        # Monitor heal completion
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, 'Unexpected:Heal has not yet completed')
        g.log.info('Heal has completed successfully')

        # Check if bricks are online
        all_bricks = get_all_bricks(self.mnode, self.volname)
        ret = are_bricks_online(self.mnode, self.volname, all_bricks)
        self.assertTrue(ret, 'Unexpected:All bricks are not online')
        g.log.info('All bricks are online')

        # Get areequal after replacing brick
        ret, result_after_replacing_brick = (collect_mounts_arequal(
            self.mounts[0]))
        self.assertTrue(ret, 'Failed to get arequal')
        g.log.info('Getting areequal after replacing of brick '
                   'is successful')

        # Comparing arequals
        self.assertEqual(
            result_before_replacing_brick, result_after_replacing_brick,
            'Arequals are not equals before replacing '
            'brick and after replacing brick')
        g.log.info('Arequals are equals before replacing brick '
                   'and after replacing brick')

        # Creating files on client side for dir1
        # Write IO
        all_mounts_procs, count = [], 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s/dir1" %
                   (self.script_upload_path1, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count += 10

        # Replacing a brick while IO's are going on
        ret = replace_brick_from_volume(self.mnode, self.volname, self.servers,
                                        self.all_servers_info)
        self.assertTrue(ret, "Unexpected:Replace brick is not successful")
        g.log.info("Expected : Replace brick is successful")

        # Wait for brick to come online
        ret = wait_for_bricks_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, "Unexpected:Bricks are not online")
        g.log.info("Expected : Bricks are online")

        # Validating IO's and waiting to complete
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Create 2 directories and start IO's which opens FD
        ret = mkdir(self.mounts[0].client_system,
                    "%s/count{1..2}" % self.mounts[0].mountpoint)
        self.assertTrue(ret, "Failed to create directories")
        g.log.info("Directories created on %s successfully", self.mounts[0])

        all_fd_procs, count = [], 1
        for mount_obj in self.mounts:
            cmd = ("cd %s ;/usr/bin/env python %s -n 10 -t 120 "
                   "-d 5 -c 16 --dir count%s" %
                   (mount_obj.mountpoint, self.script_upload_path2, count))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_fd_procs.append(proc)
            count += 1

        # Replacing a brick while open FD IO's are going on
        ret = replace_brick_from_volume(self.mnode, self.volname, self.servers,
                                        self.all_servers_info)
        self.assertTrue(ret, "Unexpected:Replace brick is not successful")
        g.log.info("Expected : Replace brick is successful")

        # Wait for brick to come online
        ret = wait_for_bricks_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, "Unexpected:Bricks are not online")
        g.log.info("Expected : Bricks are online")

        # Validating IO's and waiting to complete
        ret = validate_io_procs(all_fd_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Close connection and check file exist for memory log
        ret = file_exists(self.mnode, '/var/log/glusterfs/mem_usage.log')
        self.assertTrue(ret, "Unexpected:Memory log file does " "not exist")
        g.log.info("Memory log file exists")
        for proc in cmd_list_procs:
            ret, _, _ = proc.async_communicate()
            self.assertEqual(ret, 0, "Memory logging failed")
            g.log.info("Memory logging is successful")
    def test_snap_info_from_detached_node(self):
        # pylint: disable=too-many-statements
        """
        Create a volume with single brick
        Create a snapshot
        Activate the snapshot created
        Enabled uss on the volume
        Validated snap info on all the nodes
        Peer detach one node
        Validate /var/lib/glusterd/snaps on the detached node
        Probe the detached node
        """

        # Creating volume with single brick on one node
        servers_info_single_node = {
            self.servers[0]: self.all_servers_info[self.servers[0]]
        }
        bricks_list = form_bricks_list(self.mnode, self.volname, 1,
                                       self.servers[0],
                                       servers_info_single_node)
        ret, _, _ = volume_create(self.servers[0], self.volname, bricks_list)
        self.assertEqual(ret, 0, "Volume creation failed")
        g.log.info("Volume %s created successfully", self.volname)

        # Create a snapshot of the volume without volume start should fail
        self.snapname = "snap1"
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                self.snapname,
                                timestamp=False)
        self.assertNotEqual(ret, 0,
                            "Snapshot created without starting the volume")
        g.log.info("Snapshot creation failed as expected")

        # Start the volume
        ret, _, _ = volume_start(self.mnode, self.volname)
        self.assertEqual(ret, 0,
                         "Failed to start the volume %s" % self.volname)
        g.log.info("Volume start succeeded")

        # Create a snapshot of the volume after volume start
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                self.snapname,
                                timestamp=False)
        self.assertEqual(
            ret, 0, "Snapshot creation failed on the volume %s" % self.volname)
        g.log.info("Snapshot create succeeded")

        # Activate snapshot created
        ret, _, err = snap_activate(self.mnode, self.snapname)
        self.assertEqual(
            ret, 0, "Snapshot activate failed with following error %s" % (err))
        g.log.info("Snapshot activated successfully")

        # Enable uss
        self.vol_options['features.uss'] = 'enable'
        ret = set_volume_options(self.mnode, self.volname, self.vol_options)
        self.assertTrue(
            ret, "gluster volume set %s features.uss "
            "enable failed" % self.volname)
        g.log.info("gluster volume set %s features.uss "
                   "enable successfully", self.volname)

        # Validate files /var/lib/glusterd/snaps on all the servers is same
        self.pathname = "/var/lib/glusterd/snaps/%s" % self.snapname
        for server in self.servers:
            ret = file_exists(server, self.pathname)
            self.assertTrue(
                ret, "%s directory doesn't exist on node %s" %
                (self.pathname, server))
            g.log.info("%s path exists on node %s", self.pathname, server)

        # Peer detach one node
        self.random_node_peer_detach = random.choice(self.servers[1:])
        ret = peer_detach_servers(self.mnode,
                                  self.random_node_peer_detach,
                                  validate=True)
        self.assertTrue(
            ret,
            "Peer detach of node: %s failed" % self.random_node_peer_detach)
        g.log.info("Peer detach succeeded")

        # /var/lib/glusterd/snaps/<snapname> directory should not present

        ret = file_exists(self.random_node_peer_detach, self.pathname)
        self.assertFalse(
            ret, "%s directory should not exist on the peer"
            "which is detached from cluster%s" %
            (self.pathname, self.random_node_peer_detach))
        g.log.info("Expected: %s path doesn't exist on peer detached node %s",
                   self.pathname, self.random_node_peer_detach)
    def test_rm_file_when_nonhash_vol_down(self):
        """
        case -3:
        - create parent
        - mkdir parent/child
        - touch parent/child/file
        - bringdown a subvol where file is not present
        - rm -rf parent
            - Only file should be deleted
            - rm -rf of parent should fail with ENOTCONN
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-statements
        # pylint: disable=unsubscriptable-object

        # Find a non hashed subvolume(or brick)
        # Create parent dir
        parent_dir = self.mountpoint + '/parent'
        child_dir = parent_dir + '/child'
        ret = mkdir(self.clients[0], parent_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % parent_dir))
        g.log.info("mkdir of parent directory %s successful", parent_dir)

        # Create child dir
        ret = mkdir(self.clients[0], child_dir)
        self.assertTrue(ret, ('mkdir failed for %s ' % child_dir))
        g.log.info("mkdir of child directory %s successful", child_dir)

        # Create a file under child_dir
        file_one = child_dir + '/file_one'
        ret, _, err = g.run(self.clients[0], ("touch %s" % file_one))
        self.assertFalse(ret, ('touch failed for %s err: %s' %
                               (file_one, err)))

        # Find a non hashed subvolume(or brick)
        nonhashed_subvol, count = find_nonhashed_subvol(self.subvols,
                                                        "parent/child",
                                                        "file_one")
        self.assertIsNotNone(nonhashed_subvol,
                             "Error in finding nonhashed value")
        g.log.info("nonhashed_subvol %s", nonhashed_subvol._host)

        # Bring nonhashed_subbvol offline
        ret = bring_bricks_offline(self.volname, self.subvols[count])
        self.assertTrue(ret, ('Error in bringing down subvolume %s'
                              % self.subvols[count]))
        g.log.info('target subvol %s is offline', self.subvols[count])

        # 'rm -rf' on parent should fail with ENOTCONN
        ret = rmdir(self.clients[0], parent_dir)
        self.assertFalse(ret, ('Expected rmdir to fail for %s' % parent_dir))
        g.log.info("rmdir of parent directory %s failed as expected"
                   " with err %s", parent_dir, err)

        brickobject = create_brickobjectlist(self.subvols, "parent/child")
        self.assertIsNotNone(brickobject,
                             "could not create brickobject list")
        # Make sure file_one is deleted
        for brickdir in brickobject:
            dir_path = "%s/parent/child/file_one" % brickdir.path
            brick_path = dir_path.split(":")
            self.assertTrue((file_exists(brickdir._host, brick_path[1])) == 0,
                            ('Expected file %s not to exist on servers'
                             % parent_dir))
        g.log.info("file is deleted as expected")

        # Cleanup
        # Bring up the subvol - restart volume
        ret = volume_start(self.mnode, self.volname, force=True)
        self.assertTrue(ret, "Error in force start the volume")
        g.log.info('Volume restart success.')
        sleep(10)

        # Delete parent_dir
        ret = rmdir(self.clients[0], parent_dir, force=True)
        self.assertTrue(ret, ('rmdir failed for %s ' % parent_dir))
        g.log.info("rmdir of directory %s successful", parent_dir)
    def test_mkdir_with_subvol_down(self):
        '''
        Test mkdir hashed to a down subvol
        '''
        # pylint: disable=too-many-locals
        # pylint: disable=too-many-branches
        # pylint: disable=too-many-statements
        # pylint: disable=W0212
        mount_obj = self.mounts[0]
        mountpoint = mount_obj.mountpoint

        # directory that needs to be created
        parent_dir = mountpoint + '/parent'
        child_dir = mountpoint + '/parent/child'

        # get hashed subvol for name "parent"
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        hashed, count = find_hashed_subvol(subvols, "/", "parent")
        self.assertIsNotNone(hashed, "Could not find hashed subvol")

        # bring target_brick offline
        bring_bricks_offline(self.volname, subvols[count])
        ret = are_bricks_offline(self.mnode, self.volname, subvols[count])
        self.assertTrue(
            ret, ('Error in bringing down subvolume %s', subvols[count]))
        g.log.info('target subvol is offline')

        # create parent dir
        ret, _, err = g.run(self.clients[0], ("mkdir %s" % parent_dir))
        self.assertNotEqual(
            ret, 0, ('Expected mkdir of %s to fail with %s', parent_dir, err))
        g.log.info('mkdir of dir %s failed as expected', parent_dir)

        # check that parent_dir does not exist on any bricks and client
        brickobject = create_brickobjectlist(subvols, "/")
        for brickdir in brickobject:
            adp = "%s/parent" % brickdir.path
            bpath = adp.split(":")
            self.assertTrue(
                (file_exists(brickdir._host, bpath[1])) == 0,
                ('Expected dir %s not to exist on servers', parent_dir))

        for client in self.clients:
            self.assertTrue(
                (file_exists(client, parent_dir)) == 0,
                ('Expected dir %s not to exist on clients', parent_dir))

        g.log.info('dir %s does not exist on mount as expected', parent_dir)

        # Bring up the subvols and create parent directory
        bring_bricks_online(self.mnode,
                            self.volname,
                            subvols[count],
                            bring_bricks_online_methods=None)
        ret = are_bricks_online(self.mnode, self.volname, subvols[count])
        self.assertTrue(
            ret, ("Error in bringing back subvol %s online", subvols[count]))
        g.log.info('Subvol is back online')

        ret, _, _ = g.run(self.clients[0], ("mkdir %s" % parent_dir))
        self.assertEqual(ret, 0,
                         ('Expected mkdir of %s to succeed', parent_dir))
        g.log.info('mkdir of dir %s successful', parent_dir)

        # get hash subvol for name "child"
        hashed, count = find_hashed_subvol(subvols, "parent", "child")
        self.assertIsNotNone(hashed, "Could not find hashed subvol")

        # bring target_brick offline
        bring_bricks_offline(self.volname, subvols[count])
        ret = are_bricks_offline(self.mnode, self.volname, subvols[count])
        self.assertTrue(
            ret, ('Error in bringing down subvolume %s', subvols[count]))
        g.log.info('target subvol is offline')

        # create child dir
        ret, _, err = g.run(self.clients[0], ("mkdir %s" % child_dir))
        self.assertNotEqual(
            ret, 0, ('Expected mkdir of %s to fail with %s', child_dir, err))
        g.log.info('mkdir of dir %s failed', child_dir)

        # check if child_dir exists on any bricks
        for brickdir in brickobject:
            adp = "%s/parent/child" % brickdir.path
            bpath = adp.split(":")
            self.assertTrue(
                (file_exists(brickdir._host, bpath[1])) == 0,
                ('Expected dir %s not to exist on servers', child_dir))
        for client in self.clients:
            self.assertTrue((file_exists(client, child_dir)) == 0)

        g.log.info('dir %s does not exist on mount as expected', child_dir)
Пример #22
0
    def test_gfind_deletes(self):
        """
        Verifying the glusterfind functionality with deletion of files.

        * Create a volume
        * Create a session on the volume
        * Create various files from mount point
        * Perform glusterfind pre
        * Perform glusterfind post
        * Check the contents of outfile
        * Delete the files created from mount point
        * Perform glusterfind pre
        * Perform glusterfind post
        * Check the contents of outfile
          Files deleted must be listed
        """

        # pylint: disable=too-many-statements
        # Creating a session for the volume
        g.log.info("Creating a session for the volume %s", self.volname)
        ret, _, _ = gfind_create(self.mnode, self.volname, self.session)
        self.assertEqual(ret, 0, ("Unexpected: Creation of a session for the "
                                  "volume %s failed" % self.volname))
        g.log.info("Successfully created a session for the volume %s",
                   self.volname)

        # Perform glusterfind list to check if session exists
        g.log.info("Performing glusterfind list to check if the session is "
                   "created")
        ret, _, _ = gfind_list(self.mnode,
                               volname=self.volname,
                               sessname=self.session)
        self.assertEqual(ret, 0, "Failed to list the glusterfind session")
        g.log.info("Successfully listed the glusterfind session")

        # Starting IO on the mounts
        g.log.info("Creating Files on %s:%s", self.mounts[0].client_system,
                   self.mounts[0].mountpoint)
        cmd = ("cd %s ; for i in `seq 1 10` ; "
               "do dd if=/dev/urandom of=file$i bs=1M count=1 ; "
               "done" % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to create files on mountpoint")
        g.log.info("Files created successfully on mountpoint")

        # Check if the files exist
        g.log.info("Checking the existence of files created during IO")
        for i in range(1, 11):
            ret = file_exists(self.mounts[0].client_system,
                              '%s/file%s' % (self.mounts[0].mountpoint, i))
            self.assertTrue(ret,
                            "Unexpected: File 'file%s' does not exist" % i)
            g.log.info("Successfully validated existence of 'file%s'", i)

        sleep(5)

        # Perform glusterfind pre for the session
        g.log.info("Performing glusterfind pre for the session %s",
                   self.session)
        ret, _, _ = gfind_pre(self.mnode,
                              self.volname,
                              self.session,
                              self.outfiles[0],
                              full=True,
                              noencode=True,
                              debug=True)
        self.assertEqual(ret, 0, ("Failed to perform glusterfind pre"))
        g.log.info("Successfully performed glusterfind pre")

        # Check if the outfile exists
        g.log.info("Checking if outfile created during glusterfind pre command"
                   " exists")
        ret = file_exists(self.mnode, self.outfiles[0])
        self.assertTrue(
            ret, "Unexpected: File '%s' does not exist" % self.outfiles[0])
        g.log.info("Successfully validated existence of '%s'",
                   self.outfiles[0])

        # Check if all the files are listed in the outfile
        for i in range(1, 11):
            ret = check_if_pattern_in_file(self.mnode, 'file%s' % i,
                                           self.outfiles[0])
            self.assertEqual(ret, 0, ("File 'file%s' not listed in %s" %
                                      (i, self.outfiles[0])))
            g.log.info("File 'file%s' listed in %s", i, self.outfiles[0])

        # Perform glusterfind post for the session
        g.log.info("Performing glusterfind post for the session %s",
                   self.session)
        ret, _, _ = gfind_post(self.mnode, self.volname, self.session)
        self.assertEqual(ret, 0, ("Failed to perform glusterfind post"))
        g.log.info("Successfully performed glusterfind post")

        # Delete the files created from mount point
        g.log.info("Deleting the Files on %s:%s", self.mounts[0].client_system,
                   self.mounts[0].mountpoint)
        for i in range(1, 11):
            ret = remove_file(self.mounts[0].client_system,
                              "%s/file%s" % (self.mounts[0].mountpoint, i),
                              force=True)
            self.assertTrue(ret, "Failed to delete file%s" % i)
        g.log.info("Successfully deleted all the files")

        # Check if the files deleted exist from mount point
        g.log.info("Checking the existence of files that were deleted "
                   "(must not be present)")
        for i in range(1, 11):
            ret = file_exists(self.mounts[0].client_system,
                              '%s/file%s' % (self.mounts[0].mountpoint, i))
            self.assertFalse(
                ret, "Unexpected: File 'file%s' exists even after"
                " being deleted" % i)
            g.log.info("Successfully validated 'file%s' does not exist", i)

        sleep(5)

        # Perform glusterfind pre for the session
        g.log.info("Performing glusterfind pre for the session %s",
                   self.session)
        ret, _, _ = gfind_pre(self.mnode,
                              self.volname,
                              self.session,
                              self.outfiles[1],
                              debug=True)
        self.assertEqual(ret, 0, ("Failed to perform glusterfind pre"))
        g.log.info("Successfully performed glusterfind pre")

        # Check if the outfile exists
        g.log.info("Checking if outfile created during glusterfind pre command"
                   " exists")
        ret = file_exists(self.mnode, self.outfiles[1])
        self.assertTrue(
            ret, "Unexpected: File '%s' does not exist" % self.outfiles[1])
        g.log.info("Successfully validated existence of '%s'",
                   self.outfiles[1])

        # Check if all the files are listed in the outfile
        for i in range(1, 11):
            pattern = "DELETE file%s" % i
            ret = check_if_pattern_in_file(self.mnode, pattern,
                                           self.outfiles[1])
            self.assertEqual(ret, 0, ("File 'file%s' not listed in %s" %
                                      (i, self.outfiles[1])))
            g.log.info("File 'file%s' listed in %s", i, self.outfiles[1])
    def test_quota_file_larger_than_limit(self):
        # pylint: disable=too-many-statements
        """
        Verifying directory Quota functionality with respect to the
        limit-usage option.

        If a limit is set and a file of size larger than limit is created
        then the file creation will stop when it will reach the limit.

        Quota list will show limit-set and size as same.

        * Enable Quota
        * Create a directory from mount point
        * Set a limit of 10 MB on the directory
        * Set Quota soft-timeout and hard-timeout to 0 seconds
        * Create a file of size larger than the Quota limit
          eg. 20 MB file
        * Perform Quota list operation to check if all the fields are
          appropriate such as hard_limit, available_space, sl_exceeded,
          hl_execeeded, etc.
        """
        # Enable Quota
        g.log.info("Enabling Quota on the volume %s", self.volname)
        ret, _, _ = quota_enable(self.mnode, self.volname)
        self.assertEqual(
            ret, 0, ("Failed to enable Quota on the volume %s", self.volname))
        g.log.info("Successfully enabled Quota on the volume %s", self.volname)

        # Path to set the Quota limit
        path = '/foo'

        # Create a directory 'foo' from the mount point
        mount_obj = self.mounts[0]
        mount_dir = mount_obj.mountpoint
        client = mount_obj.client_system

        g.log.info("Creating dir named 'foo' from client %s", client)
        ret = mkdir(client, "%s/foo" % mount_dir)
        self.assertTrue(
            ret, "Failed to create dir under %s-%s" % (client, mount_dir))
        g.log.info("Directory 'foo' created successfully")

        # Set Quota limit of 10 MB on the directory 'foo' of the volume
        g.log.info("Set Quota Limit on the path %s of the volume %s", path,
                   self.volname)
        ret, _, _ = quota_limit_usage(self.mnode,
                                      self.volname,
                                      path=path,
                                      limit="10MB")
        self.assertEqual(ret, 0, ("Failed to set Quota limit on path %s of "
                                  "the volume %s", path, self.volname))
        g.log.info("Successfully set the Quota limit on %s of the volume %s",
                   path, self.volname)

        # Set Quota soft-timeout to 0 seconds
        g.log.info("Set Quota soft timeout:")
        ret, _, _ = quota_set_soft_timeout(self.mnode, self.volname, '0sec')
        self.assertEqual(ret, 0, ("Failed to set soft timeout"))
        g.log.info("Quota soft timeout set successful")

        # Set Quota hard-timeout to 0 second
        g.log.info("Set Quota hard timeout:")
        ret, _, _ = quota_set_hard_timeout(self.mnode, self.volname, '0sec')
        self.assertEqual(ret, 0, ("Failed to set hard timeout"))
        g.log.info("Quota hard timeout set successful")

        # Validate if the Quota limit set is appropriate
        g.log.info(
            "Validate if the Quota limit set is correct for the "
            "directory %s of the volume %s", path, self.volname)
        ret = quota_validate(self.mnode,
                             self.volname,
                             path=path,
                             hard_limit=10485760)
        self.assertTrue(
            ret, ("Quota Limit of 10 MB was not set properly on "
                  "the directory %s of the volume %s", path, self.volname))
        g.log.info(
            "Successfully Validated Quota Limit of 10 MB is set on the"
            " directory %s of the volume %s", path, self.volname)

        # Create a single file of size 20 MB
        g.log.info("Creating Files on %s:%s", client, mount_dir)
        cmd = ("cd %s/foo ; "
               "dd if=/dev/zero of=20MBfile "
               "bs=1M "
               "count=20" % mount_dir)
        ret, _, _ = g.run(client, cmd)
        self.assertEqual(
            ret, 1, "Unexpected: File creation succeeded even "
            "after exceeding the hard-limit")
        g.log.info("Expected: File creation failed after exceeding "
                   "hard-limit")

        # List all files and dirs created
        g.log.info("List all files and directories:")
        ret = list_all_files_and_dirs_mounts(self.mounts)
        self.assertTrue(ret, "Failed to list all files and dirs")
        g.log.info("Listing all files and directories is successful")

        # Check if the file created above exists
        g.log.info("Checking if the file created exists in the volume %s",
                   self.volname)
        ret = file_exists(client, "%s/foo/20MBfile" % mount_dir)
        self.assertTrue(ret,
                        ("File does not exist in the volume %s", self.volname))
        g.log.info(
            "Successfully validated the presence of file in the "
            "volume %s", self.volname)

        # Validate if the Quota limit set is appropriate
        g.log.info(
            "Validate if the Quota list fields are appropriate for the "
            "directory %s of the volume %s", path, self.volname)
        ret = quota_validate(self.mnode,
                             self.volname,
                             path=path,
                             hard_limit=10485760,
                             avail_space=0,
                             sl_exceeded=True,
                             hl_exceeded=True)
        self.assertTrue(ret, ("Failed to validate the Quota limits on "
                              "the volume %s", self.volname))
        g.log.info(
            "Successfully Validated Quota Limit of 100 MB is set on the"
            " directory %s of the volume %s", path, self.volname)
    def test_directory_custom_extended_attr(self):
        """Test - set custom xattr to directory and link to directory
        """
        # pylint: disable = too-many-statements
        dir_prefix = '{root}/folder_{client_index}'

        for mount_index, mount_point in enumerate(self.mounts):
            folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                            client_index=mount_index)

            # Create a directory from mount point
            g.log.info('Creating directory : %s:%s', mount_point.mountpoint,
                       folder_name)
            ret = mkdir(mount_point.client_system, folder_name)
            self.assertTrue(
                ret, 'Failed to create directory %s on mount point %s' %
                (folder_name, mount_point.mountpoint))

            ret = file_exists(mount_point.client_system, folder_name)
            self.assertTrue(
                ret, 'Created Directory %s does not exists on mount '
                'point %s' % (folder_name, mount_point.mountpoint))
            g.log.info('Created directory %s:%s', mount_point.mountpoint,
                       folder_name)

            # Verify that hash layout values are set on each
            # bricks for the dir
            g.log.debug("Verifying hash layout values")
            ret = validate_files_in_dir(mount_point.client_system,
                                        mount_point.mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_DIR)
            self.assertTrue(
                ret, "Expected - Directory is stored "
                "on hashed bricks")
            g.log.info("Hash layout values are set on each bricks")

            # Verify that mount point should not display
            # xattr : trusted.gfid and dht
            g.log.debug("Loading extra attributes")
            ret = get_fattr_list(mount_point.client_system, folder_name)

            self.assertTrue(
                'trusted.gfid' not in ret,
                "Extended attribute trusted.gfid is presented on "
                "mount point %s and folder %s" %
                (mount_point.mountpoint, folder_name))
            self.assertTrue(
                'trusted.glusterfs.dht' not in ret,
                "Extended attribute trusted.glusterfs.dht is "
                "presented on mount point %s and folder %s" %
                (mount_point.mountpoint, folder_name))

            g.log.info(
                'Extended attributes trusted.gfid and '
                'trusted.glusterfs.dht does not exists on '
                'mount point %s:%s ', mount_point.mountpoint, folder_name)

            # Verify that mount point shows pathinfo xattr
            g.log.debug("Check for xattr trusted.glusterfs.pathinfo on %s:%s",
                        mount_point, folder_name)
            ret = get_fattr(mount_point.client_system,
                            mount_point.mountpoint,
                            'trusted.glusterfs.pathinfo',
                            encode="text")
            self.assertIsNotNone(
                ret, "trusted.glusterfs.pathinfo is not "
                "presented on %s:%s" % (mount_point.mountpoint, folder_name))
            g.log.info(
                'pathinfo xattr is displayed on mount point %s and '
                'dir %s', mount_point.mountpoint, folder_name)

            # Create a custom xattr for dir
            g.log.info("Set attribute user.foo to %s", folder_name)
            ret = set_fattr(mount_point.client_system, folder_name, 'user.foo',
                            'bar2')
            self.assertTrue(
                ret, "Setup custom attribute on %s:%s failed" %
                (mount_point.client_system, folder_name))

            g.log.info('Set custom attribute is set on %s:%s',
                       mount_point.client_system, folder_name)
            # Verify that custom xattr for directory is displayed
            # on mount point and bricks
            g.log.debug('Check xarttr user.foo on %s:%s',
                        mount_point.client_system, folder_name)
            ret = get_fattr(mount_point.client_system,
                            folder_name,
                            'user.foo',
                            encode="text")
            self.assertEqual(
                ret, 'bar2', "Xattr attribute user.foo is not presented on "
                "mount point %s and directory %s" %
                (mount_point.client_system, folder_name))

            g.log.info(
                'Custom xattr user.foo is presented on mount point'
                ' %s:%s ', mount_point.client_system, folder_name)

            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix.format(root=brick_dir,
                                               client_index=mount_index)

                ret = get_fattr(brick_server,
                                brick_path,
                                'user.foo',
                                encode="text")

                g.log.debug('Check custom xattr for directory on brick %s:%s',
                            brick_server, brick_path)
                self.assertEqual(
                    'bar2', ret, "Expected: user.foo should be on brick %s\n"
                    "Actual: Value of attribute foo.bar %s" %
                    (brick_path, ret))
                g.log.info('Custom xattr is presented on brick %s', brick_path)

            # Delete custom attribute
            ret = delete_fattr(mount_point.client_system, folder_name,
                               'user.foo')
            self.assertTrue(ret, "Failed to delete custom attribute")

            g.log.info('Removed custom attribute from directory %s:%s',
                       mount_point.client_system, folder_name)
            # Verify that custom xattr is not displayed after delete
            # on mount point and on the bricks

            g.log.debug('Looking if custom extra attribute user.foo is '
                        'presented on mount or on bricks after deletion')
            self.assertIsNone(
                get_fattr(mount_point.client_system,
                          folder_name,
                          'user.foo',
                          encode="text"),
                "Xattr user.foo is presented on mount point"
                " %s:%s after deletion" %
                (mount_point.mountpoint, folder_name))

            g.log.info(
                "Xattr user.foo is not presented after deletion"
                " on mount point %s:%s", mount_point.mountpoint, folder_name)

            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix.format(root=brick_dir,
                                               client_index=mount_index)
                self.assertIsNone(
                    get_fattr(brick_server, brick_path, 'user.foo'),
                    "Deleted xattr user.foo is presented on "
                    "brick %s:%s" % (brick, brick_path))
                g.log.info(
                    'Custom attribute is not presented after delete '
                    'from directory on brick %s:%s', brick, brick_path)

        # Repeat all of the steps for link of created directory
        for mount_index, mount_point in enumerate(self.mounts):
            linked_folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                                   client_index="%s_linked" %
                                                   mount_index)
            folder_name = dir_prefix.format(root=mount_point.mountpoint,
                                            client_index=mount_index)
            # Create link to created dir
            command = 'ln -s {src} {dst}'.format(dst=linked_folder_name,
                                                 src=folder_name)
            ret, _, _ = g.run(mount_point.client_system, command)
            self.assertEqual(
                0, ret, 'Failed to create link %s to directory %s' %
                (linked_folder_name, folder_name))
            self.assertTrue(
                file_exists(mount_point.client_system, linked_folder_name),
                'Link does not exists on %s:%s' %
                (mount_point.client_system, linked_folder_name))
            g.log.info('Create link %s to directory %s', linked_folder_name,
                       folder_name)

            # Verify that hash layout values are set on each
            # bricks for the link to dir
            g.log.debug("Verifying hash layout values")
            ret = validate_files_in_dir(mount_point.client_system,
                                        mount_point.mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS,
                                        file_type=FILETYPE_LINK)
            self.assertTrue(
                ret, "Expected - Link to directory is stored "
                "on hashed bricks")
            g.log.info("Hash layout values are set on each bricks")

            # Verify that mount point should not display xattr :
            # trusted.gfid and dht
            g.log.debug("Loading extra attributes")
            ret = get_fattr_list(mount_point.client_system, linked_folder_name)

            self.assertTrue(
                'trusted.gfid' not in ret,
                "Extended attribute trudted.gfid is presented on "
                "mount point %s and folder %s" %
                (mount_point.mountpoint, linked_folder_name))

            self.assertTrue(
                'trusted.glusterfs.dht' not in ret,
                "Extended attribute trusted.glusterfs.dht is "
                "presented on mount point %s and folder %s" %
                (mount_point.mountpoint, linked_folder_name))

            g.log.info(
                'Extended attributes trusted.gfid and '
                'trusted.glusterfs.dht does not exists on '
                'mount point %s:%s ', mount_point.mountpoint,
                linked_folder_name)

            # Verify that mount point shows pathinfo xattr
            g.log.debug("Check if pathinfo is presented on %s:%s",
                        mount_point.client_system, linked_folder_name)
            self.assertIsNotNone(
                get_fattr(mount_point.client_system, mount_point.mountpoint,
                          'trusted.glusterfs.pathinfo'),
                "pathinfo is not displayed on mountpoint "
                "%s:%s" % (mount_point.client_system, linked_folder_name))
            g.log.info('pathinfo value is displayed on mount point %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Set custom Attribute to link
            g.log.debug("Set custom xattribute user.foo to %s:%s",
                        mount_point.client_system, linked_folder_name)
            self.assertTrue(
                set_fattr(mount_point.client_system, linked_folder_name,
                          'user.foo', 'bar2'))
            g.log.info('Successful in set custom attribute to %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Verify that custom xattr for directory is displayed
            # on mount point and bricks
            g.log.debug('Check mountpoint and bricks for custom xattribute')
            self.assertEqual(
                'bar2',
                get_fattr(mount_point.client_system,
                          linked_folder_name,
                          'user.foo',
                          encode="text"),
                'Custom xattribute is not presented on '
                'mount point %s:%s' %
                (mount_point.client_system, linked_folder_name))
            g.log.info("Custom xattribute is presented on mount point %s:%s",
                       mount_point.client_system, linked_folder_name)
            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix. \
                    format(root=brick_dir,
                           client_index="%s_linked" % mount_index)
                cmd = '[ -f %s ] && echo "yes" || echo "no"' % brick_path
                # Check if link exists
                _, ret, _ = g.run(brick_server, cmd)
                if 'no' in ret:
                    g.log.info("Link %s:%s does not exists", brick_server,
                               brick_path)
                    continue

                self.assertEqual(
                    get_fattr(brick_server,
                              brick_path,
                              'user.foo',
                              encode="text"), 'bar2',
                    "Actual: custom attribute not "
                    "found on brick %s:%s" % (brick_server, brick_path))
                g.log.info('Custom xattr for link found on brick %s:%s', brick,
                           brick_path)

            # Delete custom attribute
            g.log.debug('Removing customer attribute on mount point %s:%s',
                        mount_point.client_system, linked_folder_name)
            self.assertTrue(
                delete_fattr(mount_point.client_system, linked_folder_name,
                             'user.foo'), 'Fail on delete xattr user.foo')
            g.log.info('Deleted custom xattr from link %s:%s',
                       mount_point.client_system, linked_folder_name)

            # Verify that custom xattr is not displayed after delete
            # on mount point and on the bricks
            g.log.debug(
                "Check if custom xattr is presented on %s:%s "
                "after deletion", mount_point.client_system,
                linked_folder_name)
            self.assertIsNone(
                get_fattr(mount_point.client_system,
                          linked_folder_name,
                          'user.foo',
                          encode="text"),
                "Expected: xattr user.foo to be not presented on"
                " %s:%s" % (mount_point.client_system, linked_folder_name))
            g.log.info("Custom xattr user.foo is not presented on %s:%s",
                       mount_point.client_system, linked_folder_name)
            for brick in get_all_bricks(self.mnode, self.volname):
                brick_server, brick_dir = brick.split(':')
                brick_path = dir_prefix. \
                    format(root=brick_dir,
                           client_index="%s_linked" % mount_index)
                cmd = '[ -f %s ] && echo "yes" || echo "no"' % brick_path
                # Check if link exists
                _, ret, _ = g.run(brick_server, cmd)
                if 'no' in ret:
                    g.log.info("Link %s:%s does not exists", brick_server,
                               brick_path)
                    continue

                self.assertIsNone(
                    get_fattr(brick_server,
                              brick_path,
                              'user.foo',
                              encode="text"),
                    "Extended custom attribute is presented on "
                    "%s:%s after deletion" % (brick_server, brick_path))
                g.log.info(
                    'Custom attribute is not presented after delete '
                    'from link on brick %s:%s', brick_server, brick_path)

        g.log.info('Directory - custom extended attribute validation getfattr,'
                   ' setfattr is successful')
Пример #25
0
    def test_ec_uss_snapshot(self):
        """
        - Start resource consumption tool
        - Create directory dir1
        - Create 5 directory and 5 files in dir of mountpoint
        - Rename all files inside dir1 at mountpoint
        - Create softlink and hardlink of files in dir1 of mountpoint
        - Delete op for deleting all file in one of the dirs inside dir1
        - Create tiny, small, medium and large file
        - Create IO's
        - Enable USS
        - Create a Snapshot
        - Activate Snapshot
        - List snapshot and the contents inside snapshot
        - Delete Snapshot
        - Create Snapshot with same name
        - Activate Snapshot
        - List snapshot and the contents inside snapshot
        - Validating IO's and waiting for it to complete
        - Close connection and check file exist for memory log
        """
        # pylint: disable=too-many-branches,too-many-statements,too-many-locals
        # Starting resource consumption using top
        log_file_mem_monitor = '/var/log/glusterfs/mem_usage.log'
        cmd = ("for i in {1..20};do top -n 1 -b|egrep "
               "'RES|gluster' & free -h 2>&1 >> %s ;"
               "sleep 10;done" % (log_file_mem_monitor))
        g.log.info(cmd)
        cmd_list_procs = []
        for server in self.servers:
            proc = g.run_async(server, cmd)
            cmd_list_procs.append(proc)

        # Creating dir1
        ret = mkdir(self.mounts[0].client_system,
                    "%s/dir1" % self.mounts[0].mountpoint)
        self.assertTrue(ret, "Failed to create dir1")
        g.log.info("Directory dir1 on %s created successfully", self.mounts[0])

        # Create 5 dir and 5 files in each dir at mountpoint on dir1
        start, end = 1, 5
        for mount_obj in self.mounts:
            # Number of dir and files to be created.
            dir_range = ("%s..%s" % (str(start), str(end)))
            file_range = ("%s..%s" % (str(start), str(end)))
            # Create dir 1-5 at mountpoint.
            ret = mkdir(mount_obj.client_system,
                        "%s/dir1/dir{%s}" % (mount_obj.mountpoint, dir_range))
            self.assertTrue(ret, "Failed to create directory")
            g.log.info("Directory created successfully")

            # Create files inside each dir.
            cmd = ('touch %s/dir1/dir{%s}/file{%s};' %
                   (mount_obj.mountpoint, dir_range, file_range))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "File creation failed")
            g.log.info("File created successfull")

            # Increment counter so that at next client dir and files are made
            # with diff offset. Like at next client dir will be named
            # dir6, dir7...dir10. Same with files.
            start += 5
            end += 5

        # Rename all files inside dir1 at mountpoint on dir1
        cmd = ('cd %s/dir1/dir1/; '
               'for FILENAME in *;'
               'do mv $FILENAME Unix_$FILENAME;'
               'done;' % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to rename file on " "client")
        g.log.info("Successfully renamed file on client")

        # Truncate at any dir in mountpoint inside dir1
        # start is an offset to be added to dirname to act on
        # diff files at diff clients.
        start = 1
        for mount_obj in self.mounts:
            cmd = ('cd %s/dir1/dir%s/; '
                   'for FILENAME in *;'
                   'do echo > $FILENAME;'
                   'done;' % (mount_obj.mountpoint, str(start)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Truncate failed")
            g.log.info("Truncate of files successfull")

        # Create softlink and hardlink of files in mountpoint. Start is an
        # offset to be added to dirname to act on diff files at diff clients.
        start = 1
        for mount_obj in self.mounts:
            cmd = ('cd %s/dir1/dir%s; '
                   'for FILENAME in *; '
                   'do ln -s $FILENAME softlink_$FILENAME;'
                   'done;' % (mount_obj.mountpoint, str(start)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Creating Softlinks have failed")
            g.log.info("Softlink of files have been changed successfully")

            cmd = ('cd %s/dir1/dir%s; '
                   'for FILENAME in *; '
                   'do ln $FILENAME hardlink_$FILENAME;'
                   'done;' % (mount_obj.mountpoint, str(start + 1)))
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Creating Hardlinks have failed")
            g.log.info("Hardlink of files have been changed successfully")
            start += 5

        # Create tiny, small, medium and large file
        # at mountpoint. Offset to differ filenames
        # at diff clients.
        offset = 1
        for mount_obj in self.mounts:
            cmd = 'fallocate -l 100 tiny_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for tiny files failed")
            g.log.info("Fallocate for tiny files successfully")

            cmd = 'fallocate -l 20M small_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for small files failed")
            g.log.info("Fallocate for small files successfully")

            cmd = 'fallocate -l 200M medium_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for medium files failed")
            g.log.info("Fallocate for medium files successfully")

            cmd = 'fallocate -l 1G large_file%s.txt' % str(offset)
            ret, _, _ = g.run(mount_obj.client_system, cmd)
            self.assertFalse(ret, "Fallocate for large files failed")
            g.log.info("Fallocate for large files successfully")
            offset += 1

    # Creating files on client side for dir1
    # Write IO
        all_mounts_procs, count = [], 1
        for mount_obj in self.mounts:
            g.log.info("Starting IO on %s:%s", mount_obj.client_system,
                       mount_obj.mountpoint)
            cmd = ("/usr/bin/env python %s create_deep_dirs_with_files "
                   "--dirname-start-num %d "
                   "--dir-depth 2 "
                   "--dir-length 10 "
                   "--max-num-of-dirs 5 "
                   "--num-of-files 5 %s/dir1" %
                   (self.script_upload_path, count, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
            count += 10

        # Enable USS
        ret, _, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable USS on volume")
        g.log.info("Successfully enabled USS on volume")

        # Create Snapshot
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                "ec_snap",
                                timestamp=False)
        self.assertEqual(ret, 0, "Failed to create snapshot ec_snap")
        g.log.info("Snapshot ec_snap of volume %s created"
                   "successfully.", self.volname)

        # Activate snapshot
        ret, _, _ = snap_activate(self.mnode, "ec_snap")
        self.assertEqual(ret, 0, "Failed to activate snapshot ec_snap")
        g.log.info("Snapshot activated successfully")

        # List contents inside snaphot and wait before listing
        sleep(5)
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            self.assertIn(
                "ec_snap", out.split("\n"), "Failed to "
                "validate ec_snap under .snaps directory")
            g.log.info("Activated Snapshot listed Successfully")

        # Delete Snapshot ec_snap
        ret, _, _ = snap_delete(self.mnode, "ec_snap")
        self.assertEqual(ret, 0, "Failed to delete snapshot")
        g.log.info("Snapshot deleted Successfully")

        # Creating snapshot with the same name
        ret, _, _ = snap_create(self.mnode,
                                self.volname,
                                "ec_snap",
                                timestamp=False)
        self.assertEqual(ret, 0, "Failed to create snapshot ec_snap")
        g.log.info("Snapshot ec_snap of volume %s created"
                   "successfully.", self.volname)

        # Activate snapshot ec_snap
        ret, _, _ = snap_activate(self.mnode, "ec_snap")
        self.assertEqual(ret, 0, "Failed to activate snapshot ec_snap")
        g.log.info("Snapshot activated successfully")

        # List contents inside ec_snap and wait before listing
        sleep(5)
        for mount_obj in self.mounts:
            ret, out, _ = uss_list_snaps(mount_obj.client_system,
                                         mount_obj.mountpoint)
            self.assertEqual(
                ret, 0, "Directory Listing Failed for"
                " Activated Snapshot")
            self.assertIn(
                "ec_snap", out.split('\n'), "Failed to "
                "validate ec_snap under .snaps directory")
            g.log.info("Activated Snapshot listed Successfully")

        # Validating IO's and waiting to complete
        ret = validate_io_procs(all_mounts_procs, self.mounts)
        self.assertTrue(ret, "IO failed on some of the clients")
        g.log.info("Successfully validated all io's")

        # Close connection and check file exist for memory log
        ret = file_exists(self.mnode, '/var/log/glusterfs/mem_usage.log')
        self.assertTrue(ret, "Unexpected:Memory log file does " "not exist")
        g.log.info("Memory log file exists")
        for proc in cmd_list_procs:
            ret, _, _ = proc.async_communicate()
            self.assertEqual(ret, 0, "Memory logging failed")
            g.log.info("Memory logging is successful")
Пример #26
0
    def test_op_version(self):
        '''
        -> Create Volume
        -> Get the current op-version
        -> Get the max supported op-version
        -> Verify vol info file exists or not in all servers
        -> Get the version number from vol info file
        -> If current op-version is less than max-op-version
        set the current op-version to max-op-version
        -> After vol set operation verify that version number
        increased by one or not in vol info file
        -> verify that current-op-version and max-op-version same or not.
        '''

        # Getting current op-version
        vol_dict = get_volume_options(self.mnode, 'all',
                                      'cluster.op-version')
        current_op_version = int(vol_dict['cluster.op-version'])

        # Getting Max op-verison
        all_dict = get_volume_options(self.mnode, 'all')
        max_op_version = int(all_dict['cluster.max-op-version'])

        # File_path: path for vol info file
        # Checking vol file exist in all servers or not
        file_path = '/var/lib/glusterd/vols/' + self.volname + '/info'
        for server in self.servers:
            ret = file_exists(server, file_path)
            self.assertTrue(ret, "Vol file not found in server %s" % server)
            g.log.info("vol file found in server %s", server)

        # Getting version number from vol info file
        # cmd: grepping  version from vol info file
        ret, out, _ = g.run(self.mnode,
                            ' '.join(['grep', "'^version'", file_path]))
        version_list = out.split('=')
        version_no = int(version_list[1]) + 1

        # Comparing current op-version and max op-version
        if current_op_version < max_op_version:

            # Set max-op-version
            ret = set_volume_options(self.mnode, 'all',
                                     {'cluster.op-version': max_op_version})
            self.assertTrue(ret, "Failed to set max op-version for cluster")
            g.log.info("Setting up max-op-version is successful for cluster")

            # Grepping version number from vol info file after
            # vol set operation
            ret, out, _ = g.run(self.mnode,
                                ' '.join(['grep', "'^version'", file_path]))
            version_list = out.split('=')
            after_version_no = int(version_list[1])

            # Comparing version number before and after vol set operations
            self.assertEqual(version_no, after_version_no,
                             "After volume set operation version "
                             "number not increased by one")
            g.log.info("After volume set operation version number "
                       "increased by one")

            # Getting current op-version
            vol_dict = get_volume_options(self.mnode, 'all',
                                          'cluster.op-version')
            current_op_version = int(vol_dict['cluster.op-version'])

        # Checking current-op-version and max-op-version equal or not
        self.assertEqual(current_op_version, max_op_version,
                         "Current op-version and max op-version "
                         "are not same")
        g.log.info("current-op-version and max-op-version of cluster are same")
Пример #27
0
    def test_gfind_query_cli(self):
        """
        Verifying the glusterfind query command functionality with valid
        and invalid values for the required and optional parameters.

        * Create a volume
        * Perform some I/O from the mount point
        * Perform glusterfind query with the following combinations:
            - Valid values for required parameters
            - Invalid values for required parameters
            - Valid values for optional parameters
            - Invalid values for optional parameters

            Where
            Required parameters: volname and sessname
            Optional parameters: debug
        """

        # pylint: disable=too-many-statements
        # Starting IO on the mounts
        mount_obj = self.mounts[0]
        mount_dir = mount_obj.mountpoint
        client = mount_obj.client_system

        g.log.info("Creating Files on %s:%s", client, mount_dir)
        cmd = ("cd %s ; for i in `seq 1 10` ; "
               "do dd if=/dev/urandom of=file$i bs=1M count=1 ; "
               "done" % mount_dir)
        ret, _, _ = g.run(client, cmd)
        self.assertEqual(ret, 0, "Failed to create files on mountpoint")
        g.log.info("Files created successfully on mountpoint")

        # Check if the files exist
        g.log.info("Checking the existence of files created during IO")
        for i in range(1, 11):
            ret = file_exists(client, '%s/file%s' % (mount_dir, i))
            self.assertTrue(ret, "Unexpected: File 'file%s' does not exist"
                            % i)
            g.log.info("Successfully validated existence of 'file%s'", i)

        # Perform glusterfind query for the volume
        g.log.info("Performing glusterfind query using valid values for the "
                   "required parameters")
        ret, _, _ = gfind_query(self.mnode, self.volname, self.outfile,
                                full=True)
        self.assertEqual(ret, 0, "Failed to perform glusterfind query")
        g.log.info("Successfully performed glusterfind query")

        # Check if the outfile exists
        g.log.info("Checking if outfile created during glusterfind query "
                   "command exists")
        ret = file_exists(self.mnode, self.outfile)
        self.assertTrue(ret, "Unexpected: File '%s' does not exist"
                        % self.outfile)
        g.log.info("Successfully validated existence of '%s'", self.outfile)

        # Check if all the files are listed in the outfile
        for i in range(1, 11):
            ret = check_if_pattern_in_file(self.mnode,
                                           'file%s' % i, self.outfile)
            self.assertEqual(ret, 0,
                             ("File 'file%s' not listed in %s"
                              % (i, self.outfile)))
            g.log.info("File 'file%s' listed in %s", i, self.outfile)

        # Perform glusterfind query using the invalid values for required
        # parameters
        not_volume = 'invalid-volume-name-for-glusterfind-query'
        g.log.info("Performing glusterfind query using invalid values for "
                   "required parameters")
        ret, _, _ = gfind_query(self.mnode, not_volume, self.outfile,
                                since='none')
        self.assertNotEqual(ret, 0, "Unexpected: glusterfind query Successful "
                            "even with invalid values for required parameters")
        g.log.info("Successful: glusterfind query failed with invalid values "
                   "for required parameters")

        # Perform glusterfind query using the invalid values for optional
        # parameters
        g.log.info("Performing glusterfind query using invalid values for the "
                   "optional parameters")
        invalid_options = [' --dbug', ' --noencod', ' --type n', ' --fll',
                           ' --tagforfullfind', ' --disablepartial',
                           ' --outprefix none', ' --namespc']
        for opt in invalid_options:
            ret, _, _ = g.run(self.mnode, ("glusterfind query %s %s %s"
                                           % (self.volname, self.outfile,
                                              opt)))
            self.assertNotEqual(ret, 0, "Unexpected: glusterfind query "
                                " Successful for option %s which is invalid"
                                % opt)
        g.log.info("Successful: glusterfind query failed with invalid value "
                   "for optional parameters")
Пример #28
0
    def test_rename_directory_no_destination_folder(self):
        """Test rename directory with no destination folder"""
        dirs = {
            'initial': '{root}/folder_{client_index}',
            'new_folder': '{root}/folder_renamed{client_index}'
        }

        for mount_index, mount_obj in enumerate(self.mounts):
            client_host = mount_obj.client_system
            mountpoint = mount_obj.mountpoint
            initial_folder = dirs['initial'].format(
                root=mount_obj.mountpoint,
                client_index=mount_index
            )

            ret = validate_files_in_dir(client_host, mountpoint,
                                        test_type=LAYOUT_IS_COMPLETE,
                                        file_type=FILETYPE_DIRS)
            self.assertTrue(ret, "Expected - Layout is complete")
            g.log.info('Layout is complete')

            # Create source folder on mount point
            self.assertTrue(mkdir(client_host, initial_folder),
                            'Failed creating source directory')
            self.assertTrue(file_exists(client_host, initial_folder))
            g.log.info('Created source directory %s on mount point %s',
                       initial_folder, mountpoint)

            # Create files and directories
            ret = self.create_files(client_host, initial_folder, self.files,
                                    content='Textual content')

            self.assertTrue(ret, 'Unable to create files on mount point')
            g.log.info('Files and directories are created')

            ret = validate_files_in_dir(client_host, mountpoint,
                                        test_type=FILE_ON_HASHED_BRICKS)
            self.assertTrue(ret, "Expected - Files and dirs are stored "
                            "on hashed bricks")
            g.log.info('Files and dirs are stored on hashed bricks')

            new_folder_name = dirs['new_folder'].format(
                root=mountpoint,
                client_index=mount_index
            )
            # Check if destination dir does not exist
            self.assertFalse(file_exists(client_host, new_folder_name),
                             'Expected New folder name should not exists')
            # Rename source folder
            ret = move_file(client_host, initial_folder,
                            new_folder_name)
            self.assertTrue(ret, "Rename direcoty failed")
            g.log.info('Renamed directory %s to %s', initial_folder,
                       new_folder_name)

            # Old dir does not exists and destination is presented
            self.assertFalse(file_exists(client_host, initial_folder),
                             '%s should be not listed' % initial_folder)
            g.log.info('The old directory %s does not exists on mount point',
                       initial_folder)
            self.assertTrue(file_exists(client_host, new_folder_name),
                            'Destination dir does not exists %s' %
                            new_folder_name)
            g.log.info('The new folder is presented %s', new_folder_name)

            # Check bricks for source and destination directories
            for brick_item in get_all_bricks(self.mnode, self.volname):
                brick_host, brick_dir = brick_item.split(':')

                initial_folder = dirs['initial'].format(
                    root=brick_dir,
                    client_index=mount_index
                )
                new_folder_name = dirs['new_folder'].format(
                    root=brick_dir,
                    client_index=mount_index
                )

                self.assertFalse(file_exists(brick_host, initial_folder),
                                 "Expected folder %s to be not presented" %
                                 initial_folder)
                self.assertTrue(file_exists(brick_host, new_folder_name),
                                'Expected folder %s to be presented' %
                                new_folder_name)

                g.log.info('The old directory %s does not exists and directory'
                           ' %s is presented', initial_folder, new_folder_name)
        g.log.info('Rename directory when destination directory '
                   'does not exists is successful')
    def test_detach_node_used_to_mount(self):
        # pylint: disable=too-many-statements
        """
        Test case:
        1.Create a 1X3 volume with only 3 nodes from the cluster.
        2.Mount volume on client node using the ip of the fourth node.
        3.Write IOs to the volume.
        4.Detach node N4 from cluster.
        5.Create a new directory on the mount point.
        6.Create a few files using the same command used in step 3.
        7.Add three more bricks to make the volume
          2x3 using add-brick command.
        8.Do a gluster volume rebalance on the volume.
        9.Create more files from the client on the mount point.
        10.Check for files on bricks from both replica sets.
        11.Create a new directory from the client on the mount point.
        12.Check for directory in both replica sets.
        """

        # Create and start a volume
        ret = setup_volume(self.mnode, self.all_servers_info, self.volume)
        self.assertTrue(ret, "Failed to create and start volume")
        g.log.info("Volume %s created successfully", self.volname)

        # Mounting the volume.
        ret, _, _ = mount_volume(self.volname,
                                 mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.servers[4],
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume %s is not mounted") % self.volname)
        g.log.info("Volume mounted successfully using %s", self.servers[4])

        # Creating 100 files.
        command = ('for number in `seq 1 100`;do touch ' +
                   self.mounts[0].mountpoint + '/file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation failed.")
        g.log.info("Files create on mount point.")

        # Detach N4 from the list.
        ret, _, _ = peer_detach(self.mnode, self.servers[4])
        self.assertEqual(ret, 0, "Failed to detach %s" % self.servers[4])
        g.log.info("Peer detach successful %s", self.servers[4])

        # Creating a dir.
        ret = mkdir(self.mounts[0].client_system,
                    self.mounts[0].mountpoint + "/dir1",
                    parents=True)
        self.assertTrue(ret, ("Failed to create directory dir1."))
        g.log.info("Directory dir1 created successfully.")

        # Creating 100 files.
        command = ('for number in `seq 101 200`;do touch ' +
                   self.mounts[0].mountpoint + '/file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation failed.")
        g.log.info("Files create on mount point.")

        # Forming brick list
        brick_list = form_bricks_list_to_add_brick(self.mnode, self.volname,
                                                   self.servers,
                                                   self.all_servers_info)

        # Adding bricks
        ret, _, _ = add_brick(self.mnode, self.volname, brick_list)
        self.assertEqual(ret, 0,
                         "Failed to add brick to the volume %s" % self.volname)
        g.log.info("Brick added successfully to the volume %s", self.volname)

        # Start rebalance for volume.
        g.log.info("Starting rebalance on the volume")
        ret, _, _ = rebalance_start(self.mnode, self.volname)
        self.assertEqual(ret, 0, ("Failed to start rebalance "
                                  "on the volume %s", self.volname))
        g.log.info("Successfully started rebalance on the volume %s",
                   self.volname)

        # Creating 100 files.
        command = ('for number in `seq 201 300`;do touch ' +
                   self.mounts[0].mountpoint + '/file$number; done')
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "File creation failed.")
        g.log.info("Files create on mount point.")

        # Check for files on bricks.
        attempts = 10
        while attempts:
            number = str(randint(1, 300))
            for brick in brick_list:
                brick_server, brick_dir = brick.split(':')
                file_name = brick_dir + "/file" + number
                if file_exists(brick_server, file_name):
                    g.log.info("Check xattr"
                               " on host %s for file %s", brick_server,
                               file_name)
                    ret = get_fattr_list(brick_server, file_name)
                    self.assertTrue(ret,
                                    ("Failed to get xattr for %s" % file_name))
                    g.log.info("Got xattr for %s successfully", file_name)
            attempts -= 1

        # Creating a dir.
        ret = mkdir(self.mounts[0].client_system,
                    self.mounts[0].mountpoint + "/dir2")
        if not ret:
            attempts = 5
            while attempts:
                ret = mkdir(self.mounts[0].client_system,
                            self.mounts[0].mountpoint + "/dir2")
                if ret:
                    break
                attempts -= 1
        self.assertTrue(ret, ("Failed to create directory dir2."))
        g.log.info("Directory dir2 created successfully.")

        # Check for directory in both replica sets.
        for brick in brick_list:
            brick_server, brick_dir = brick.split(':')
            folder_name = brick_dir + "/dir2"
            if file_exists(brick_server, folder_name):
                g.log.info(
                    "Check trusted.glusterfs.dht"
                    " on host %s for directory %s", brick_server, folder_name)
                ret = get_fattr(brick_server, folder_name,
                                'trusted.glusterfs.dht')
                self.assertTrue(ret, ("Failed to get trusted.glusterfs.dht"
                                      " xattr for %s" % folder_name))
                g.log.info(
                    "Get trusted.glusterfs.dht xattr"
                    " for %s successfully", folder_name)