Exemple #1
0
    def _collect_and_compare_file_info_on_mnt(self,
                                              link_file_name,
                                              values,
                                              expected=True):
        """Collect the files's permissions on mountpoint and compare"""
        stat_test_file = get_file_stat(self.client,
                                       "{}/test_file".format(self.m_point))
        self.assertIsNotNone(stat_test_file, "Failed to get stat of test_file")
        stat_link_file = get_file_stat(
            self.client, "{}/{}".format(self.m_point, link_file_name))
        self.assertIsNotNone(stat_link_file,
                             "Failed to get stat of {}".format(link_file_name))

        for key in values:
            if expected is True:
                self.assertEqual(
                    stat_test_file[key], stat_link_file[key],
                    "The {} is not same for test_file"
                    " and {}".format(key, link_file_name))
                g.log.info("The %s for test_file and %s is same on mountpoint",
                           key, link_file_name)
            else:
                self.assertNotEqual(
                    stat_test_file[key], stat_link_file[key],
                    "Unexpected : The {} is same for test_file"
                    " and {}".format(key, link_file_name))
                g.log.info(
                    "The %s for test_file and %s is different"
                    " on mountpoint", key, link_file_name)
Exemple #2
0
    def _compare_file_permissions(self,
                                  file_name,
                                  file_info_mnt=None,
                                  file_info_brick=None):
        """Check if the file's permission are same on mountpoint and
        backend-bricks"""
        if (file_info_mnt is None and file_info_brick is None):
            file_info_mnt = (get_file_stat(
                self.client, "{}/{}".format(self.m_point,
                                            file_name)))['access']
            self.assertIsNotNone(
                file_info_mnt,
                "Failed to get access time for {}".format(file_name))
            brick_list = get_pathinfo(self.client,
                                      "{}/{}".format(self.m_point, file_name))
            self.assertNotEqual(
                brick_list, 0,
                "Failed to get bricklist for {}".format(file_name))
            file_info_brick = []
            for brick in brick_list['brickdir_paths']:
                host, path = brick.split(':')
                info_brick = (get_file_stat(host, path))['access']
                file_info_brick.append(info_brick)

        for info in file_info_brick:
            self.assertEqual(
                info, file_info_mnt, "File details for {} are diffrent on"
                " backend-brick".format(file_name))
            g.log.info("Details for file %s is correct"
                       " on backend-bricks", file_name)
    def _verify_link_file_exists(brickdir, file_name):
        """ Verifies whether a file link is present in given subvol
        Args:
               brickdir(Class Object): BrickDir object containing data about
                                       bricks under a specific subvol
        Returns:
                True/False(bool): Based on existance of file link
        """
        # pylint: disable=protected-access
        # pylint: disable=unsubscriptable-object
        file_path = brickdir._fqpath + file_name
        file_stat = get_file_stat(brickdir._host, file_path)
        if file_stat is None:
            g.log.error("Failed to get File stat for %s", file_path)
            return False
        if not file_stat['access'] == "1000":
            g.log.error("Access value not 1000 for %s", file_path)
            return False

        # Check for file type to be'sticky empty', have size of 0 and
        # have the glusterfs.dht.linkto xattr set.
        ret = is_linkto_file(brickdir._host, file_path)
        if not ret:
            g.log.error("%s is not a linkto file", file_path)
            return False
        return True
Exemple #4
0
 def _check_file_stat_on_mountpoint(self, file_name, file_type):
     """Check the file-type on mountpoint"""
     file_stat = (get_file_stat(self.client,
                                "{}/{}".format(self.m_point,
                                               file_name)))['filetype']
     self.assertEqual(file_stat, file_type,
                      "File is not a {}".format(file_type))
     g.log.info("File is %s", file_type)
    def test_readlink(self):
        # create file
        g.log.info("Creating %s/file.txt", self.mounts[0].mountpoint)
        cmd = ("echo 'hello_world' > %s/file.txt" % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "File creation failed")
        g.log.info("Created %s/file.txt", self.mounts[0].mountpoint)

        # create symlink
        g.log.info("Creating %s/symlink.txt to %s/file.txt",
                   self.mounts[0].mountpoint, self.mounts[0].mountpoint)
        cmd = ("ln -s file.txt %s/symlink.txt" % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "symlink creation failed")
        g.log.info("Created %s/symlink.txt to %s/file.txt",
                   self.mounts[0].mountpoint, self.mounts[0].mountpoint)

        # stat symlink on mount and verify file type and permission.
        g.log.info("Checking file permissions")
        path = ("%s/symlink.txt" % self.mounts[0].mountpoint)
        stat_dict = get_file_stat(self.clients[0], path)
        self.assertEqual(stat_dict['filetype'], 'symbolic link', "Expected "
                         "symlink but found %s" % stat_dict['filetype'])
        self.assertEqual(stat_dict['access'], '777', "Expected 777 "
                         "but found %s" % stat_dict['access'])
        g.log.info("File permissions for symlink.txt is 777 as expected")

        # readlink to verify contents
        g.log.info("Performing readlink on %s/symlink.txt",
                   self.mounts[0].mountpoint)
        cmd = ("readlink %s/symlink.txt" % self.mounts[0].mountpoint)
        _, val, _ = g.run(self.clients[0], cmd)
        content = val.strip()
        self.assertEqual(content, "file.txt", "Readlink error:got %s"
                         % content)
        g.log.info("readlink returned 'file.txt' as expected")

        # stat symlink on bricks and verify file type and permission.
        g.log.info("Checking file type and permissions on bricks")
        for brick in self.bricks_list:
            node, path = brick.split(':')
            filepath = path + "/symlink.txt"
            stat_dict = get_file_stat(node, filepath)
            self.assertEqual(stat_dict['filetype'], 'symbolic link', "Expected"
                             " symlink but found %s" % stat_dict['filetype'])
            g.log.info("file permission 777 for symlink.txt on %s", brick)
    def _compare_stat_output_from_mout_point_and_bricks(self):
        """Compare stat output from mountpoint and bricks"""
        for fname in self.list_of_device_files:
            # Fetch stat output from mount point
            mountpoint_stat = get_file_stat(self.clients[0], fname)
            bricks = get_pathinfo(self.clients[0], fname)

            # Fetch stat output from bricks
            for brick_path in bricks['brickdir_paths']:
                node, path = brick_path.split(":")
                brick_stat = get_file_stat(node, path)
                for key in ("filetype", "access", "size", "username",
                            "groupname", "uid", "gid", "epoch_atime",
                            "epoch_mtime", "epoch_ctime"):
                    self.assertEqual(
                        mountpoint_stat[key], brick_stat[key],
                        "Difference observed between stat output "
                        "of mountpoint and bricks for file %s" % fname)
 def _check_permissions_of_dir(self):
     """Check permissions of dir created."""
     for brick_path in get_all_bricks(self.mnode, self.volname):
         node, path = brick_path.split(":")
         ret = get_file_stat(node, "{}/dir".format(path))
         self.assertEqual(
             int(ret["access"]), 755,
             "Unexpected:Permissions of dir is %s and not %d" %
             (ret["access"], 755))
     g.log.info("Permissions of dir directory is proper on all bricks")
 def _check_filetype_of_files_from_mountpoint(self):
     """Check filetype of files from mountpoint"""
     for filetype in self.filetype_list:
         # Check if filetype is as expected
         ret = get_file_stat(
             self.clients[0],
             self.list_of_device_files[self.filetype_list.index(filetype)])
         self.assertEqual(
             ret['filetype'], filetype,
             "File type not reflecting properly for %s" % filetype)
    def are_stat_timestamps_equal(self):
        """Check if atime/mtime/ctime in stat info are identical"""
        timestamps = []
        for brick_path in self.bricks_list:
            server, brick = brick_path.split(':')
            stat_data = get_file_stat(server, "%s/dir1" % brick)
            ts_string = "{}-{}-{}".format(stat_data['epoch_atime'],
                                          stat_data['epoch_mtime'],
                                          stat_data['epoch_ctime'])
            timestamps.append(ts_string)

        g.log.debug("stat list = %s", ''.join(map(str, timestamps)))
        return timestamps.count(timestamps[0]) == len(timestamps)
    def test_ctime_updated_if_mtime_is_updated(self):
        """
        whenever atime or mtime gets updated ctime too must get updated
        1. test with features.ctime enabled
        2. touch /mnt/file1
        3. stat /mnt/file1
        4. sleep 1;
        5. touch -m -d "2020-01-01 12:00:00" /mnt/file1
        6. stat /mnt/file1
        """
        # Enable features.ctime
        ret = set_volume_options(self.mnode, self.volname,
                                 {'features.ctime': 'on'})
        self.assertTrue(ret,
                        'failed to enable ctime feature on %s' % self.volume)

        # Create a file on the mountpoint
        objectname = 'file_zyx1'
        objectpath = ('%s/%s' % (self.mounts[0].mountpoint, objectname))
        create_file_cmd = "touch {}".format(objectpath)
        modify_mtimr_cmd = (
            'touch -m -d "2020-01-01 12:00:00" {}'.format(objectpath))
        ret, _, _ = g.run(self.mounts[0].client_system, create_file_cmd)
        self.assertFalse(ret, "File creation failed on the mountpoint")

        # Get stat of the file
        stat_data = get_file_stat(self.mounts[0].client_system, objectpath)
        self.assertFalse(
            ret, "Failed to get stat of the file {}".format(objectname))
        ret, _, _ = g.run(self.mounts[0].client_system, modify_mtimr_cmd)
        self.assertFalse(ret, "Failed to run {}".format(modify_mtimr_cmd))

        sleep(3)
        stat_data1 = get_file_stat(self.mounts[0].client_system, objectpath)

        # Check if mtime and ctime are changed
        for key in ('mtime', 'ctime'):
            self.assertNotEqual(stat_data[key], stat_data1[key],
                                "Before and after not same")
Exemple #11
0
    def _check_user_permission(self):
        """
        Verify permissions on MP and file
        """
        stat_mp_dict = get_file_stat(self.client, self.mountpoint)
        self.assertIsNotNone(stat_mp_dict,
                             "stat on %s failed" % self.mountpoint)
        self.assertEqual(
            stat_mp_dict['access'], '777', "Expected 777 "
            "but found %s" % stat_mp_dict['access'])
        g.log.info("File permissions for mountpoint is 777 as expected")

        # check owner and group of random file
        fpath = self.mountpoint + "/d1/f.1"
        stat_dict = get_file_stat(self.client, fpath)
        self.assertIsNotNone(stat_dict, "stat on %s failed" % fpath)
        self.assertEqual(
            stat_dict['username'], self.user,
            "Expected %s but found %s" % (self.user, stat_dict['username']))
        self.assertEqual(
            stat_dict['groupname'], self.user,
            "Expected %s but found %s" % (self.user, stat_dict['groupname']))
        g.log.info("User and Group are %s as expected", self.user)
Exemple #12
0
    def _check_change_time_brick(self, file_name):
        """Find out the modification time for file on backend-bricks"""
        brick_list = get_pathinfo(self.client,
                                  "{}/{}".format(self.m_point, file_name))
        self.assertNotEqual(brick_list, 0,
                            "Failed to get bricklist for {}".format(file_name))

        brick_mtime = []
        for brick in brick_list['brickdir_paths']:
            host, path = brick.split(':')
            cmd = "ls -lR {}".format(path)
            ret, _, _ = g.run(host, cmd)
            self.assertEqual(ret, 0, "Lookup failed on"
                             " brick:{}".format(path))
            file_ctime_brick = (get_file_stat(host, path))['epoch_ctime']
            brick_mtime.append(file_ctime_brick)
        return brick_mtime
Exemple #13
0
 def _verify_stat_info(self, nodes_to_check, test_file):
     """
     Helper method to verify stat on all bricks and client.
     """
     for node in nodes_to_check:
         filepath = nodes_to_check[node] + "/" + test_file
         stat_dict = get_file_stat(node, filepath)
         self.assertIsNotNone(stat_dict, "stat on {} failed"
                              .format(test_file))
         self.assertEqual(stat_dict['username'], self.user,
                          "Expected qa but found {}"
                          .format(stat_dict['username']))
         self.assertEqual(stat_dict['groupname'], self.user,
                          "Expected gid qa but found {}"
                          .format(stat_dict['groupname']))
         self.assertEqual(stat_dict['access'], '777',
                          "Expected permission 777 but found {}"
                          .format(stat_dict['access']))
    def verify_gfid_and_link_count(self, dirname, filename):
        """
        check that the dir and all files under it have the same gfid on all 3
        bricks and that they have the .glusterfs entry as well.
        """
        dir_gfids = dict()
        file_gfids = dict()
        bricks_list = get_all_bricks(self.mnode, self.volname)
        for brick in bricks_list:
            brick_node, brick_path = brick.split(":")

            ret = get_fattr(brick_node, '%s/%s' % (brick_path, dirname),
                            'trusted.gfid')
            self.assertIsNotNone(
                ret, "trusted.gfid is not presented "
                "on %s/%s" % (brick_path, dirname))
            dir_gfids.setdefault(dirname, []).append(ret)

            ret = get_fattr(brick_node,
                            '%s/%s/%s' % (brick_path, dirname, filename),
                            'trusted.gfid')
            self.assertIsNotNone(
                ret, "trusted.gfid is not presented on "
                "%s/%s/%s" % (brick_path, dirname, filename))
            file_gfids.setdefault(filename, []).append(ret)

            stat_data = get_file_stat(
                brick_node, "%s/%s/%s" % (brick_path, dirname, filename))
            self.assertEqual(stat_data["links"], "2", 'Link count is not 2')

        for key in dir_gfids:
            self.assertTrue(
                all(value == dir_gfids[key][0] for value in dir_gfids[key]),
                'gfids do not '
                'match for %s on all bricks' % dirname)
        for key in file_gfids:
            self.assertTrue(
                all(value == file_gfids[key][0] for value in file_gfids[key]),
                'gfids do not '
                'match for %s/%s on all bricks' % (dirname, filename))
Exemple #15
0
    def test_heal_gfid_1x3(self):
        """
        Description: This test case verifies the gfid self-heal on a 1x3
                 replicate volume.
                 1. file created at mount point
                 2. 2 bricks brought down
                 3. file deleted
                 4. created a new file from the mount point
                 5. all bricks brought online
                 6. check if gfid worked correctly
        """

        g.log.info("setting the quorum type to fixed")
        options = {"cluster.quorum-type": "fixed"}
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, "unable to set the quorum type to fixed")
        g.log.info("Successfully set the quorum type to fixed")

        g.log.info("creating a file from mount point")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("python %s create_files "
                   "-f 1 --base-file-name test_file --fixed-file-size 10k %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
        # Validate I/O
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        g.log.info("Successfully created a file from mount point")

        # getting list of all bricks
        all_bricks = get_all_bricks(self.mnode, self.volname)
        self.assertIsNotNone(all_bricks, "unable to get list of bricks")
        g.log.info("bringing down brick1 and brick2")
        ret = bring_bricks_offline(self.volname, all_bricks[:2])
        self.assertTrue(ret, "unable to bring bricks offline")
        g.log.info("Successfully brought the following bricks offline "
                   ": %s", str(all_bricks[:2]))

        g.log.info("deleting the file from mount point")
        command = "rm -f " + self.mounts[0].mountpoint + "/test_file1"
        ret, _, _ = g.run(self.mounts[0].client_system, command)
        self.assertEqual(ret, 0, "unable to remove file from mount point")
        g.log.info("Successfully deleted file from mountpoint")

        g.log.info("creating a new file of same name and different size "
                   "from mount point")
        all_mounts_procs = []
        for mount_obj in self.mounts:
            cmd = ("python %s create_files "
                   "-f 1 --base-file-name test_file --fixed-file-size 1M %s" %
                   (self.script_upload_path, mount_obj.mountpoint))
            proc = g.run_async(mount_obj.client_system,
                               cmd,
                               user=mount_obj.user)
            all_mounts_procs.append(proc)
        # Validate I/O
        self.assertTrue(validate_io_procs(all_mounts_procs, self.mounts),
                        "IO failed on some of the clients")
        g.log.info("Successfully created a new file of same name "
                   "from mount point")

        g.log.info("bringing bricks 1 and 2 back online")
        ret = bring_bricks_online(self.mnode, self.volname, all_bricks[:2])
        self.assertIsNotNone(ret, "unable to bring bricks online")
        g.log.info("Successfully brought the following bricks online "
                   ": %s", str(all_bricks[:2]))

        g.log.info("checking if stat structure of the file is returned")
        ret = get_file_stat(self.mounts[0].client_system,
                            self.mounts[0].mountpoint + '/test_file0.txt')
        self.assertTrue(ret, "unable to get file stats")
        g.log.info("file stat structure returned successfully")

        g.log.info("checking if the heal has completed")
        ret = is_heal_complete(self.mnode, self.volname)
        self.assertTrue(ret, "heal not completed")
        g.log.info("Self heal was completed successfully")

        g.log.info("checking if the areequal checksum of all the bricks in "
                   "the subvol match")
        checksum_list = []
        for brick in all_bricks:
            node, brick_path = brick.split(':')
            command = "arequal-checksum -p " + brick_path + \
                      " -i .glusterfs -i .landfill"
            ret, out, _ = g.run(node, command)
            self.assertEqual(
                ret, 0, "unable to get the arequal checksum "
                "of the brick")
            checksum_list.append(out)
            # checking file size of healed file on each brick to verify
            # correctness of choice for sink and source
            stat_dict = get_file_stat(node, brick_path + '/test_file0.txt')
            self.assertEqual(
                stat_dict['size'], '1048576',
                "file size of healed file is different "
                "than expected")
        flag = all(val == checksum_list[0] for val in checksum_list)
        self.assertTrue(flag, "the arequal checksum of all bricks is"
                        "not same")
        g.log.info("the arequal checksum of all the bricks in the subvol "
                   "is same")
    def test_file_access(self):
        """
        Test file access.
        """
        # pylint: disable=protected-access
        # pylint: disable=too-many-locals
        # pylint: disable=too-many-statements
        mount_obj = self.mounts[0]
        mountpoint = mount_obj.mountpoint

        # get subvol list
        subvols = (get_subvols(self.mnode, self.volname))['volume_subvols']
        self.assertIsNotNone(subvols, "failed to get subvols")

        # create a file
        srcfile = mountpoint + '/testfile'
        ret, _, err = g.run(self.clients[0], ("touch %s" % srcfile))
        self.assertEqual(ret, 0, ("File creation failed for %s err %s",
                                  srcfile, err))
        g.log.info("testfile creation successful")

        # find hashed subvol
        srchashed, scount = find_hashed_subvol(subvols, "/", "testfile")
        self.assertIsNotNone(srchashed, "could not find srchashed")
        g.log.info("hashed subvol for srcfile %s subvol count %s",
                   srchashed._host, str(scount))

        # rename the file such that the new name hashes to a new subvol
        tmp = find_new_hashed(subvols, "/", "testfile")
        self.assertIsNotNone(tmp, "could not find new hashed for dstfile")
        g.log.info("dst file name : %s dst hashed_subvol : %s "
                   "subvol count : %s", tmp.newname,
                   tmp.hashedbrickobject._host, str(tmp.subvol_count))

        dstname = str(tmp.newname)
        dstfile = mountpoint + "/" + dstname
        dsthashed = tmp.hashedbrickobject
        dcount = tmp.subvol_count
        ret, _, err = g.run(self.clients[0], ("mv %s %s" %
                                              (srcfile, dstfile)))
        self.assertEqual(ret, 0, ("rename failed for %s err %s",
                                  srcfile, err))
        g.log.info("cmd: mv srcfile dstfile successful")

        # check that on dsthash_subvol the file is a linkto file
        filepath = dsthashed._fqpath + "/" + dstname
        file_stat = get_file_stat(dsthashed._host, filepath)
        self.assertEqual(file_stat['access'], "1000", ("Expected file "
                                                       "permission to be 1000"
                                                       " on subvol %s",
                                                       dsthashed._host))
        g.log.info("dsthash_subvol has the expected linkto file")

        # check on srchashed the file is a data file
        filepath = srchashed._fqpath + "/" + dstname
        file_stat = get_file_stat(srchashed._host, filepath)
        self.assertNotEqual(file_stat['access'], "1000", ("Expected file "
                                                          "permission not to"
                                                          "be 1000 on subvol"
                                                          "%s",
                                                          srchashed._host))

        # Bring down the hashed subvol of dstfile(linkto file)
        ret = bring_bricks_offline(self.volname, subvols[dcount])
        self.assertTrue(ret, ('Error in bringing down subvolume %s',
                              subvols[dcount]))
        g.log.info('dst subvol %s is offline', subvols[dcount])

        # Need to access the file through a fresh lookup through a new mount
        # create a new dir(choosing server to do a mount)
        ret, _, _ = g.run(self.mnode, ("mkdir -p /mnt"))
        self.assertEqual(ret, 0, ('mkdir of mount dir failed'))
        g.log.info("mkdir of mount dir succeeded")

        # do a temp mount
        ret = mount_volume(self.volname, self.mount_type, "/mnt",
                           self.mnode, self.mnode)
        self.assertTrue(ret, ('temporary mount failed'))
        g.log.info("temporary mount succeeded")

        # check that file is accessible (stat)
        ret, _, _ = g.run(self.mnode, ("stat /mnt/%s" % dstname))
        self.assertEqual(ret, 0, ('stat error on for dst file %s', dstname))
        g.log.info("stat on /mnt/%s successful", dstname)

        # cleanup temporary mount
        ret = umount_volume(self.mnode, "/mnt")
        self.assertTrue(ret, ('temporary mount failed'))
        g.log.info("umount successful")

        # Bring up the hashed subvol
        ret = bring_bricks_online(self.mnode, self.volname, subvols[dcount],
                                  bring_bricks_online_methods=None)
        self.assertTrue(ret, "Error in bringing back subvol online")
        g.log.info('Subvol is back online')

        # now bring down the cached subvol
        ret = bring_bricks_offline(self.volname, subvols[scount])
        self.assertTrue(ret, ('Error in bringing down subvolume %s',
                              subvols[scount]))
        g.log.info('target subvol %s is offline', subvols[scount])

        # file access should fail
        ret, _, _ = g.run(self.clients[0], ("stat %s" % dstfile))
        self.assertEqual(ret, 1, ('stat error on for file %s', dstfile))
        g.log.info("dstfile access failed as expected")
Exemple #17
0
    def test_metadata_self_heal_client_side_heal(self):
        """
        Testcase steps:
        1.Turn off the options self heal daemon
        2.Create IO
        3.Calculate arequal of the bricks and mount point
        4.Bring down "brick1" process
        5.Change the permissions of the directories and files
        6.Change the ownership of the directories and files
        7.Change the group of the directories and files
        8.Bring back the brick "brick1" process
        9.Execute "find . | xargs stat" from the mount point to trigger heal
        10.Verify the changes in permissions are not self healed on brick1
        11.Verify the changes in permissions on all bricks but brick1
        12.Verify the changes in ownership are not self healed on brick1
        13.Verify the changes in ownership on all the bricks but brick1
        14.Verify the changes in group are not successfully self-healed
           on brick1
        15.Verify the changes in group on all the bricks but brick1
        16.Turn on the option metadata-self-heal
        17.Execute "find . | xargs md5sum" from the mount point to trgger heal
        18.Wait for heal to complete
        19.Verify the changes in permissions are self-healed on brick1
        20.Verify the changes in ownership are successfully self-healed
           on brick1
        21.Verify the changes in group are successfully self-healed on brick1
        22.Calculate arequal check on all the bricks and mount point
        """
        # Setting options
        ret = set_volume_options(self.mnode, self.volname,
                                 {"self-heal-daemon": "off"})
        self.assertTrue(ret, 'Failed to set options self-heal-daemon '
                        'and metadata-self-heal to OFF')
        g.log.info("Options are set successfully")

        # Creating files on client side
        self.test_meta_data_self_heal_folder = 'test_meta_data_self_heal'
        for mount_object in self.mounts:
            command = ("cd {0}/ ; mkdir {1} ; cd {1}/ ;"
                       "for i in `seq 1 100` ; "
                       "do mkdir dir.$i ; "
                       "for j in `seq 1 5` ; "
                       "do dd if=/dev/urandom of=dir.$i/file.$j "
                       "bs=1K count=$j ; done ; done ;".format
                       (mount_object.mountpoint,
                        self.test_meta_data_self_heal_folder))
            proc = g.run_async(mount_object.client_system, command,
                               user=mount_object.user)
            self.all_mounts_procs.append(proc)

        # Validate IO
        self.validate_io_on_clients()

        # Calculate and check arequal of the bricks and mount point
        self.check_arequal_from_mount_point_and_bricks()

        # Select bricks to bring offline from a replica set
        subvols_dict = get_subvols(self.mnode, self.volname)
        subvols = subvols_dict['volume_subvols']
        bricks_to_bring_offline = []
        bricks_to_be_online = []
        for subvol in subvols:
            bricks_to_bring_offline.append(subvol[0])
            for brick in subvol[1:]:
                bricks_to_be_online.append(brick)

        # Bring bricks offline
        ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
        self.assertTrue(ret, 'Failed to bring bricks %s offline' %
                        bricks_to_bring_offline)

        ret = are_bricks_offline(self.mnode, self.volname,
                                 bricks_to_bring_offline)
        self.assertTrue(ret, 'Bricks %s are not offline'
                        % bricks_to_bring_offline)
        g.log.info('Bringing bricks %s offline is successful',
                   bricks_to_bring_offline)

        # Change the permissions of the directories and files
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            command = ('cd {}/{}; '
                       'for i in `seq 1 100` ; '
                       'do chmod 555 dir.$i ; done ; '
                       'for i in `seq 1 50` ; '
                       'do for j in `seq 1 5` ; '
                       'do chmod 666 dir.$i/file.$j ; done ; done ; '
                       'for i in `seq 51 100` ; '
                       'do for j in `seq 1 5` ; '
                       'do chmod 444 dir.$i/file.$j ; done ; done ;'
                       .format(mount_obj.mountpoint,
                               self.test_meta_data_self_heal_folder))

            proc = g.run_async(mount_obj.client_system, command,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.validate_io_on_clients()

        # Change the ownership of the directories and files
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            command = ('cd {}/{} ; '
                       'for i in `seq 1 35` ; '
                       'do chown -R qa_func dir.$i ; done ; '
                       'for i in `seq 36 70` ; '
                       'do chown -R qa_system dir.$i ; done ; '
                       'for i in `seq 71 100` ; '
                       'do chown -R qa_perf dir.$i ; done ;'
                       .format(mount_obj.mountpoint,
                               self.test_meta_data_self_heal_folder))
            proc = g.run_async(mount_obj.client_system, command,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.validate_io_on_clients()

        # Change the group of the directories and files
        self.all_mounts_procs = []
        for mount_obj in self.mounts:
            command = ('cd {}/{}; '
                       'for i in `seq 1 100` ; '
                       'do chgrp -R qa_all dir.$i ; done ;'
                       .format(mount_obj.mountpoint,
                               self.test_meta_data_self_heal_folder))

            proc = g.run_async(mount_obj.client_system, command,
                               user=mount_obj.user)
            self.all_mounts_procs.append(proc)
        self.io_validation_complete = False

        # Validate IO
        self.validate_io_on_clients()

        # Bring brick online
        ret = bring_bricks_online(self.mnode, self.volname,
                                  bricks_to_bring_offline)
        self.assertTrue(ret, 'Failed to bring bricks %s online' %
                        bricks_to_bring_offline)
        g.log.info('Bringing bricks %s online is successful',
                   bricks_to_bring_offline)

        # Trigger heal from mount point
        self.trigger_heal_from_mount_point()

        # Verify the changes are not self healed on brick1 for each subvol
        for brick in bricks_to_bring_offline:
            node, brick_path = brick.split(':')

            dir_list = get_dir_contents(node, "{}/{}".format(
                brick_path, self.test_meta_data_self_heal_folder))
            self.assertIsNotNone(dir_list, "Dir list from "
                                 "brick is empty")
            g.log.info("Successfully got dir list from bick")

            # Verify changes for dirs
            for folder in dir_list:

                ret = get_file_stat(node, "{}/{}/{}".format(
                    brick_path, self.test_meta_data_self_heal_folder,
                    folder))

                self.assertEqual('755', ret['access'],
                                 "Permissions mismatch on node {}"
                                 .format(node))

                self.assertEqual('root', ret['username'],
                                 "User id mismatch on node {}"
                                 .format(node))

                self.assertEqual('root', ret['groupname'],
                                 "Group id mismatch on node {}"
                                 .format(node))

                # Get list of files for each dir
                file_list = get_dir_contents(node, "{}/{}/{}".format(
                    brick_path, self.test_meta_data_self_heal_folder,
                    folder))
                self.assertIsNotNone(file_list, "File list from "
                                     "brick is empty.")
                g.log.info("Successfully got file list from bick.")

                if file_list:
                    for file_name in file_list:

                        ret = get_file_stat(node, "{}/{}/{}/{}".format(
                            brick_path, self.test_meta_data_self_heal_folder,
                            folder, file_name))

                        self.assertEqual('644', ret['access'],
                                         "Permissions mismatch on node"
                                         " {} for file {}".format(node,
                                                                  file_name))

                        self.assertEqual('root', ret['username'],
                                         "User id mismatch on node"
                                         " {} for file {}".format(node,
                                                                  file_name))

                        self.assertEqual('root', ret['groupname'],
                                         "Group id mismatch on node"
                                         " {} for file {}".format(node,
                                                                  file_name))

        # Verify the changes are self healed on all bricks except brick1
        # for each subvol
        self.check_permssions_on_bricks(bricks_to_be_online)

        # Setting options
        ret = set_volume_options(self.mnode, self.volname,
                                 {"metadata-self-heal": "on"})
        self.assertTrue(ret, 'Failed to set options to ON.')
        g.log.info("Options are set successfully")

        # Trigger heal from mount point
        self.trigger_heal_from_mount_point()

        # Monitor heal completion
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal has not yet completed')

        # Check if heal is completed
        ret = is_heal_complete(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal is not complete')
        g.log.info('Heal is completed successfully')

        # Check for split-brain
        ret = is_volume_in_split_brain(self.mnode, self.volname)
        self.assertFalse(ret, 'Volume is in split-brain state')
        g.log.info('Volume is not in split-brain state')

        # Verify the changes are self healed on brick1 for each subvol
        self.check_permssions_on_bricks(bricks_to_bring_offline)

        # Calculate and check arequal of the bricks and mount point
        self.check_arequal_from_mount_point_and_bricks()
Exemple #18
0
    def check_permssions_on_bricks(self, bricks_list):
        """
        Check permssions on a given set of bricks.
        """
        for brick in bricks_list:
            node, brick_path = brick.split(':')
            dir_list = get_dir_contents(node, "{}/{}".format(
                brick_path, self.test_meta_data_self_heal_folder))
            self.assertIsNotNone(dir_list, "Dir list from "
                                 "brick is empty")
            g.log.info("Successfully got dir list from bick")

            # Verify changes for dirs
            for folder in dir_list:
                ret = get_file_stat(node, "{}/{}/{}".format(
                    brick_path, self.test_meta_data_self_heal_folder, folder))

                self.assertEqual('555', ret['access'],
                                 "Permissions mismatch on node {}"
                                 .format(node))

                self.assertEqual('1003', ret['gid'],
                                 "Group mismatch on node {}"
                                 .format(node))

                # Get list of files for each dir
                file_list = get_dir_contents(node, "{}/{}/{}".format(
                    brick_path, self.test_meta_data_self_heal_folder,
                    folder))
                self.assertIsNotNone(file_list, "File list from "
                                     "brick is empty.")
                g.log.info("Successfully got file list from bick.")

                # Verify for group for each file
                if file_list:
                    for file_name in file_list:
                        ret = get_file_stat(node, "{}/{}/{}/{}".format(
                            brick_path, self.test_meta_data_self_heal_folder,
                            folder, file_name))

                        self.assertEqual('1003', ret['gid'],
                                         "Group mismatch on node {}"
                                         .format(node))

            # Verify permissions for files in dirs 1..50
            for i in range(1, 51):

                file_list = get_dir_contents(node, "{}/{}/dir.{}".format(
                    brick_path, self.test_meta_data_self_heal_folder,
                    str(i)))
                self.assertIsNotNone(file_list, "File list from "
                                     "brick is empty.")
                g.log.info("Successfully got file list from bick.")

                if file_list:
                    for file_name in file_list:

                        ret = get_file_stat(node, "{}/{}/dir.{}/{}".format(
                            brick_path, self.test_meta_data_self_heal_folder,
                            str(i), file_name))
                        self.assertEqual('666', ret['access'],
                                         "Permissions mismatch on node {}"
                                         .format(node))

            # Verify permissions for files in dirs 51..100
            for i in range(51, 101):

                file_list = get_dir_contents(node, "{}/{}/dir.{}".format(
                    brick_path, self.test_meta_data_self_heal_folder,
                    str(i)))
                self.assertIsNotNone(file_list, "File list from "
                                     "brick is empty.")
                g.log.info("Successfully got file list from bick.")

                if file_list:
                    for file_name in file_list:

                        ret = get_file_stat(node, "{}/{}/dir.{}/{}".format(
                            brick_path, self.test_meta_data_self_heal_folder,
                            str(i), file_name))
                        self.assertEqual('444', ret['access'],
                                         "Permissions mismatch on node {}"
                                         .format(node))

            # Verify ownership for dirs 1..35
            for i in range(1, 36):

                ret = get_file_stat(node, "{}/{}/dir.{}".format(
                    brick_path, self.test_meta_data_self_heal_folder,
                    str(i)))
                self.assertEqual('1000', ret['uid'],
                                 "User id mismatch on node {}"
                                 .format(node))

                # Verify ownership for files in dirs
                file_list = get_dir_contents(node, "{}/{}/dir.{}".format(
                    brick_path, self.test_meta_data_self_heal_folder,
                    str(i)))
                self.assertIsNotNone(file_list, "File list from "
                                     "brick is empty.")
                g.log.info("Successfully got file list from bick.")

                if file_list:
                    for file_name in file_list:

                        ret = get_file_stat(node, "{}/{}/dir.{}/{}".format(
                            brick_path, self.test_meta_data_self_heal_folder,
                            str(i), file_name))
                        self.assertEqual('1000', ret['uid'],
                                         "User id mismatch on node {}"
                                         .format(node))

            # Verify ownership for dirs 36..70
            for i in range(36, 71):

                ret = get_file_stat(node, "{}/{}/dir.{}".format(
                    brick_path, self.test_meta_data_self_heal_folder,
                    str(i)))
                self.assertEqual('1001', ret['uid'],
                                 "User id mismatch on node {}"
                                 .format(node))

                # Verify ownership for files in dirs
                file_list = get_dir_contents(node, "{}/{}/dir.{}".format(
                    brick_path, self.test_meta_data_self_heal_folder,
                    str(i)))
                self.assertIsNotNone(file_list, "File list from "
                                     "brick is empty.")
                g.log.info("Successfully got file list from bick.")

                if file_list:
                    for file_name in file_list:

                        ret = get_file_stat(node, "{}/{}/dir.{}/{}".format(
                            brick_path, self.test_meta_data_self_heal_folder,
                            str(i), file_name))
                        self.assertEqual('1001', ret['uid'],
                                         "User id mismatch on node {}"
                                         .format(node))

            # Verify ownership for dirs 71..100
            for i in range(71, 101):

                ret = get_file_stat(node, "{}/{}/dir.{}".format(
                    brick_path, self.test_meta_data_self_heal_folder,
                    str(i)))
                self.assertEqual('1002', ret['uid'],
                                 "User id mismatch on node {}"
                                 .format(node))

                # Verify ownership for files in dirs
                file_list = get_dir_contents(node, "{}/{}/dir.{}".format(
                    brick_path, self.test_meta_data_self_heal_folder,
                    str(i)))
                self.assertIsNotNone(file_list, "File list from "
                                     "brick is empty.")
                g.log.info("Successfully got file list from bick.")

                if file_list:
                    for file_name in file_list:

                        ret = get_file_stat(node, "{}/{}/dir.{}/{}".format(
                            brick_path, self.test_meta_data_self_heal_folder,
                            str(i), file_name))
                        self.assertEqual('1002', ret['uid'],
                                         "User id mismatch on node {}"
                                         .format(node))
Exemple #19
0
 def validate_timestamp(self, objectpath, objectname):
     ret = get_file_stat(self.mounts[0].client_system, objectpath)
     self.assertTrue(
         bool(ret["atime"] == ret["ctime"] == ret["mtime"]),
         "a|m|c timestamps on {} are not equal".format(objectname))
     g.log.info("a|m|c timestamps on %s are same", objectname)
    def test_file_permissions(self):
        """
        Description:
        - create file file.txt on mountpoint
        - change uid, gid and permission from client
        - check uid, gid and permission on client and all servers
        """

        # create file
        cmd = ("dd if=/dev/urandom of=%s/file.txt bs=1M count=1"
               % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "File creation failed")

        # Adding servers and client in single dict to check permissions
        nodes_to_check = {}
        all_bricks = get_all_bricks(self.mnode, self.volname)
        for brick in all_bricks:
            node, brick_path = brick.split(':')
            nodes_to_check[node] = brick_path
        nodes_to_check[self.mounts[0].client_system] = \
            self.mounts[0].mountpoint

        # check file is created on all bricks and client
        for node in nodes_to_check:
            filepath = nodes_to_check[node] + "/file.txt"
            stat_dict = get_file_stat(node, filepath)
            self.assertIsNotNone(stat_dict, "stat on %s failed" % filepath)
            self.assertEqual(stat_dict['filetype'], 'regular file',
                             "Expected regular file but found %s"
                             % stat_dict['filetype'])

        # get file stat info from client
        fpath = self.mounts[0].mountpoint + "/file.txt"
        stat_dict = get_file_stat(self.clients[0], fpath)
        self.assertIsNotNone(stat_dict, "stat on %s failed" % fpath)
        self.assertEqual(stat_dict['uid'], '0', "Expected uid 0 but found %s"
                         % stat_dict['uid'])
        self.assertEqual(stat_dict['gid'], '0', "Expected gid 0 but found %s"
                         % stat_dict['gid'])
        self.assertEqual(stat_dict['access'], '644', "Expected permission 644"
                         " but found %s" % stat_dict['access'])

        # change uid, gid and permission from client
        cmd = ("chown qa %s" % fpath)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "chown failed")

        cmd = ("chgrp qa %s" % fpath)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "chgrp failed")

        cmd = ("chmod 777 %s" % fpath)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "chown failed")

        # Verify that the changes are successful on bricks and client
        for node in nodes_to_check:
            filepath = nodes_to_check[node] + "/file.txt"
            stat_dict = get_file_stat(node, filepath)
            self.assertIsNotNone(stat_dict, "stat on %s failed" % fpath)
            self.assertEqual(stat_dict['username'], 'qa',
                             "Expected qa but found %s"
                             % stat_dict['username'])
            self.assertEqual(stat_dict['groupname'], 'qa',
                             "Expected gid qa but found %s"
                             % stat_dict['groupname'])
            self.assertEqual(stat_dict['access'], '777',
                             "Expected permission 777  but found %s"
                             % stat_dict['access'])
    def test_file_permissions(self):
        # create file
        fpath = self.mounts[0].mountpoint + "/file.txt"
        cmd = ("echo 'hello_world' > %s" % fpath)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "File creation failed")

        # check file is created on all bricks
        for brick in self.bricks_list:
            node, path = brick.split(':')
            filepath = path + "/file.txt"
            stat_dict = get_file_stat(node, filepath)
            self.assertIsNotNone(stat_dict, "stat on %s failed" % filepath)
            self.assertEqual(stat_dict['filetype'], 'regular file', "Expected"
                             " symlink but found %s" % stat_dict['filetype'])

        # get file stat info from client
        stat_dict = get_file_stat(self.clients[0], fpath)
        self.assertIsNotNone(stat_dict, "stat on %s failed" % fpath)
        self.assertEqual(stat_dict['uid'], '0', "Expected uid 0 but found %s"
                         % stat_dict['uid'])
        self.assertEqual(stat_dict['gid'], '0', "Expected gid 0 but found %s"
                         % stat_dict['gid'])
        self.assertEqual(stat_dict['access'], '644', "Expected permission 644 "
                         " but found %s" % stat_dict['access'])

        # change uid, gid and permission from client
        cmd = ("chown qa %s" % fpath)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "chown failed")

        cmd = ("chgrp qa %s" % fpath)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "chgrp failed")

        cmd = ("chmod 777 %s" % fpath)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "chown failed")

        # Verify that the changes are successful on client
        stat_dict = get_file_stat(self.clients[0], fpath)
        self.assertIsNotNone(stat_dict, "stat on %s failed" % fpath)
        self.assertEqual(stat_dict['uid'], '1000', "Expected uid 1000 (qa) but"
                         " found %s" % stat_dict['uid'])
        self.assertEqual(stat_dict['gid'], '1000', "Expected gid 1000 (qa) but"
                         " found %s" % stat_dict['gid'])
        self.assertEqual(stat_dict['access'], '777', "Expected permission 777 "
                         " but found %s" % stat_dict['access'])

        # Verify that the changes are successful on bricks as well
        for brick in self.bricks_list:
            node, path = brick.split(':')
            filepath = path + "/file.txt"
            stat_dict = get_file_stat(node, filepath)
            self.assertIsNotNone(stat_dict, "stat on %s failed" % fpath)
            self.assertEqual(stat_dict['uid'], '1000', "Expected uid 1000 (qa)"
                             " but found %s" % stat_dict['uid'])
            self.assertEqual(stat_dict['gid'], '1000', "Expected gid 1000 (qa)"
                             " but found %s" % stat_dict['gid'])
            self.assertEqual(stat_dict['access'], '777', "Expected permission"
                             " 777  but found %s" % stat_dict['access'])
Exemple #22
0
    def test_directory_heal(self):
        '''
        Test directory healing.
        '''
        # pylint: disable=too-many-locals
        # pylint: disable=too-many-statements
        # pylint: disable=protected-access

        mount_obj = self.mounts[0]
        mountpoint = mount_obj.mountpoint

        # directory that needs to be created
        parent_dir = mountpoint + '/parent'
        target_dir = mountpoint + '/parent/child'

        # create parent dir
        ret, _, err = g.run(self.clients[0], ("mkdir %s" % parent_dir))
        g.log.info("mkdir of parent directory %s successful", parent_dir)
        self.assertEqual(ret, 0,
                         ('mkdir failed for %s err: %s', parent_dir, err))
        g.log.info("mkdir of parent successful")

        # find non-hashed subvol for child
        hashed, non_hashed = [], []
        hash_num = calculate_hash(self.mnode, "child")
        bricklist = get_all_bricks(self.mnode, self.volname)
        for brick in bricklist:
            ret = check_hashrange(brick + "/parent")
            hash_range_low = ret[0]
            hash_range_high = ret[1]
            if hash_range_low <= hash_num <= hash_range_high:
                hashed.append(brick)

        non_hashed = [brick for brick in bricklist if brick not in hashed]
        g.log.info("Non-hashed bricks are: %s", non_hashed)

        # bring non_hashed offline
        for brick in non_hashed:
            ret = bring_bricks_offline(self.volname, brick)
            self.assertTrue(ret, ('Error in bringing down brick %s', brick))
            g.log.info('Non-hashed brick %s is offline', brick)

        # create child directory
        runc = ("mkdir %s" % target_dir)
        ret, _, _ = g.run(self.clients[0], runc)
        self.assertEqual(ret, 0, ('failed to create dir %s' % target_dir))
        g.log.info('mkdir successful %s', target_dir)

        # Check that the dir is not created on the down brick
        for brick in non_hashed:
            non_hashed_host, dir_path = brick.split(":")
            brickpath = ("%s/parent/child" % dir_path)
            ret, _, _ = g.run(non_hashed_host, ("stat %s" % brickpath))
            self.assertEqual(ret, 1, ("Expected %s to be not present on %s" %
                                      (brickpath, non_hashed_host)))
            g.log.info("Stat of %s failed as expected", brickpath)

        # bring up the subvol
        ret = bring_bricks_online(
            self.mnode,
            self.volname,
            non_hashed,
            bring_bricks_online_methods='volume_start_force')
        self.assertTrue(ret, "Error in bringing back subvol online")
        g.log.info("Subvol is back online")

        runc = ("ls %s" % target_dir)
        ret, _, _ = g.run(self.clients[0], runc)
        self.assertEqual(ret, 0, ("Lookup on %s failed", target_dir))
        g.log.info("Lookup is successful on %s", target_dir)

        # check if the directory is created on non_hashed
        for brick in non_hashed:
            non_hashed_host, dir_path = brick.split(":")
            absolutedirpath = ("%s/parent/child" % dir_path)
            ret = get_file_stat(non_hashed_host, absolutedirpath)
            self.assertIsNotNone(ret, "Directory is not present on non_hashed")
            g.log.info("Directory is created on non_hashed subvol")

        # check if directory is healed => i.e. layout is zeroed out
        for brick in non_hashed:
            brick_path = ("%s/parent/child" % brick)
            ret = check_hashrange(brick_path)
            hash_range_low = ret[0]
            hash_range_high = ret[1]
            if not hash_range_low and not hash_range_high:
                g.log.info("Directory healing successful")
            else:
                g.log.error("Directory is not healed")
    def test_self_heal_daemon(self):
        """
        Test Data-Self-Heal(heal command)
        Description:
        - Create directory test_hardlink_self_heal
        - Create directory test_data_self_heal
        - Creating files for hardlinks and data files
        - Get arequal before getting bricks offline
        - Select bricks to bring offline
        - Bring brick offline
        - Create hardlinks and append data to data files
        - Bring brick online
        - Wait for volume processes to be online
        - Verify volume's all process are online
        - Monitor heal completion
        - Check for split-brain
        - Get arequal after getting bricks online
        - Select bricks to bring offline
        - Bring brick offline
        - Truncate data to data files and verify hardlinks
        - Bring brick online
        - Wait for volume processes to be online
        - Verify volume's all process are online
        - Monitor heal completion
        - Check for split-brain
        - Get arequal again

        """
        # pylint: disable=too-many-branches,too-many-statements,too-many-locals
        # Creating directory test_hardlink_self_heal
        ret = mkdir(
            self.mounts[0].client_system,
            "{}/test_hardlink_self_heal".format(self.mounts[0].mountpoint))
        self.assertTrue(ret, "Failed to create directory")
        g.log.info(
            "Directory 'test_hardlink_self_heal' on %s created "
            "successfully", self.mounts[0])

        # Creating directory test_data_self_heal
        ret = mkdir(self.mounts[0].client_system,
                    "{}/test_data_self_heal".format(self.mounts[0].mountpoint))
        self.assertTrue(ret, "Failed to create directory")
        g.log.info(
            "Directory test_hardlink_self_heal on %s created "
            "successfully", self.mounts[0])

        # Creating files for hardlinks and data files
        cmd = ('cd %s/test_hardlink_self_heal;for i in `seq 1 5`;'
               'do mkdir dir.$i ; for j in `seq 1 10` ; do dd if='
               '/dev/urandom of=dir.$i/file.$j bs=1k count=$j;done; done;'
               'cd ..' % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to create file on mountpoint")
        g.log.info("Successfully created files on mountpoint")

        cmd = ('cd %s/test_data_self_heal;for i in `seq 1 100`;'
               'do dd if=/dev/urandom of=file.$i bs=128K count=$i;done;'
               'cd ..' % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to create file on mountpoint")
        g.log.info("Successfully created files on mountpoint")

        # Get arequal before getting bricks offline
        ret, result_before_online = collect_mounts_arequal(self.mounts)
        self.assertTrue(ret, 'Failed to get arequal')
        g.log.info('Arequal before getting bricks online-%s',
                   result_before_online)

        # Select bricks to bring offline
        bricks_to_bring_offline = select_volume_bricks_to_bring_offline(
            self.mnode, self.volname)
        self.assertIsNotNone(bricks_to_bring_offline, "List is empty")

        # Bring brick offline
        ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
        self.assertTrue(
            ret, 'Failed to bring bricks {} offline'.format(
                bricks_to_bring_offline))

        ret = are_bricks_offline(self.mnode, self.volname,
                                 bricks_to_bring_offline)
        self.assertTrue(
            ret, 'Bricks {} are not offline'.format(bricks_to_bring_offline))
        g.log.info('Bringing bricks %s offline is successful',
                   bricks_to_bring_offline)

        # Append data to data files and create hardlinks
        cmd = ('cd %s/test_data_self_heal;for i in `seq 1 100`;'
               'do dd if=/dev/urandom of=file.$i bs=512K count=$i ; done ;'
               'cd .. ' % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to modify data files.")
        g.log.info("Successfully modified data files")

        cmd = ('cd %s/test_hardlink_self_heal;for i in `seq 1 5` ;do '
               'for j in `seq 1 10`;do ln dir.$i/file.$j dir.$i/link_file.$j;'
               'done ; done ; cd .. ' % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Hardlinks creation failed")
        g.log.info("Successfully created hardlinks of files")

        # Bring bricks online
        ret = bring_bricks_online(self.mnode, self.volname,
                                  bricks_to_bring_offline)
        self.assertTrue(
            ret,
            'Failed to bring bricks {} online'.format(bricks_to_bring_offline))
        g.log.info('Bringing bricks %s online is successful',
                   bricks_to_bring_offline)

        # Wait for volume processes to be online
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to wait for volume {} processes to "
                              "be online".format(self.volname)))
        g.log.info(
            "Successful in waiting for volume %s processes to be "
            "online", self.volname)

        # Verify volume's all process are online
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(
            ret,
            ("Volume {} : All process are not online".format(self.volname)))
        g.log.info("Volume %s : All process are online", self.volname)

        # Monitor heal completion
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal has not yet completed')

        # Check for split-brain
        ret = is_volume_in_split_brain(self.mnode, self.volname)
        self.assertFalse(ret, 'Volume is in split-brain state')
        g.log.info('Volume is not in split-brain state')

        # Get arequal after getting bricks online
        ret, result_after_online = collect_mounts_arequal(self.mounts)
        self.assertTrue(ret, 'Failed to get arequal')
        g.log.info('Arequal after getting bricks online '
                   'is %s', result_after_online)

        # Select bricks to bring offline
        bricks_to_bring_offline = select_volume_bricks_to_bring_offline(
            self.mnode, self.volname)
        self.assertIsNotNone(bricks_to_bring_offline, "List is empty")

        # Bring brick offline
        ret = bring_bricks_offline(self.volname, bricks_to_bring_offline)
        self.assertTrue(
            ret, 'Failed to bring bricks {} offline'.format(
                bricks_to_bring_offline))

        ret = are_bricks_offline(self.mnode, self.volname,
                                 bricks_to_bring_offline)
        self.assertTrue(
            ret, 'Bricks {} are not offline'.format(bricks_to_bring_offline))
        g.log.info('Bringing bricks %s offline is successful',
                   bricks_to_bring_offline)

        # Truncate data to data files and verify hardlinks
        cmd = ('cd %s/test_data_self_heal ; for i in `seq 1 100` ;'
               'do truncate -s $(( $i * 128)) file.$i ; done ; cd ..' %
               self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Failed to truncate files")
        g.log.info("Successfully truncated files on mountpoint")

        file_path = ('%s/test_hardlink_self_heal/dir{1..5}/file{1..10}' %
                     (self.mounts[0].mountpoint))
        link_path = ('%s/test_hardlink_self_heal/dir{1..5}/link_file{1..10}' %
                     (self.mounts[0].mountpoint))
        file_stat = get_file_stat(self.mounts[0], file_path)
        link_stat = get_file_stat(self.mounts[0], link_path)
        self.assertEqual(file_stat, link_stat, "Verification of hardlinks "
                         "failed")
        g.log.info("Successfully verified hardlinks")

        # Bring brick online
        ret = bring_bricks_online(self.mnode, self.volname,
                                  bricks_to_bring_offline)
        self.assertTrue(
            ret,
            'Failed to bring bricks {} online'.format(bricks_to_bring_offline))
        g.log.info('Bringing bricks %s online is successful',
                   bricks_to_bring_offline)

        # Wait for volume processes to be online
        ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
        self.assertTrue(ret, ("Failed to wait for volume {} processes to "
                              "be online".format(self.volname)))
        g.log.info(
            "Successful in waiting for volume %s processes to be "
            "online", self.volname)

        # Verify volume's all process are online
        ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
        self.assertTrue(
            ret,
            ("Volume {} : All process are not online".format(self.volname)))
        g.log.info("Volume %s : All process are online", self.volname)

        # Monitor heal completion
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal has not yet completed')
    def test_heal_on_file_appends(self):
        """
        Test steps:
        - create and mount EC volume 4+2
        - start append to a file from client
        - bring down one of the bricks (say b1)
        - wait for ~minute and bring down another brick (say b2)
        - after ~minute bring up first brick (b1)
        - check the xattrs 'ec.size', 'ec.version'
        - xattrs of online bricks should be same as an indication to heal
        """

        # Get bricks list
        bricks_list = get_online_bricks_list(self.mnode, self.volname)
        self.assertIsNotNone(bricks_list, 'Not able to get bricks list')

        # Creating a file, generate and append data to the file
        self.file_name = 'test_file'
        cmd = ("cd %s ;"
               "while true; do "
               "cat /dev/urandom | tr -dc  [:space:][:print:] "
               "| head -c 4K >> %s; sleep 2; "
               "done;" % (self.mount_obj.mountpoint, self.file_name))
        ret = g.run_async(self.client, cmd, user=self.mount_obj.user)
        self.assertIsNotNone(ret, "Not able to start IO on client")
        g.log.info('Started generating and appending data to the file')
        self.is_io_started = True

        # Select 3 bricks, 2 need to be offline and 1 will be healthy
        brick_1, brick_2, brick_3 = sample(bricks_list, 3)

        # Wait for IO to fill the bricks
        sleep(30)

        # Bring first brick offline and validate
        ret = bring_bricks_offline(self.volname, [brick_1])
        self.assertTrue(ret,
                        'Failed to bring brick {} offline'.format(brick_1))
        ret = are_bricks_offline(self.mnode, self.volname, [brick_1])
        self.assertTrue(
            ret, 'Not able to validate brick {} being '
            'offline'.format(brick_1))
        g.log.info("Brick %s is brought offline successfully", brick_1)
        self.offline_bricks.append(brick_1)

        # Wait for IO to fill the bricks
        sleep(30)

        # Bring second brick offline and validate
        ret = bring_bricks_offline(self.volname, [brick_2])
        self.assertTrue(ret,
                        'Failed to bring brick {} offline'.format(brick_2))
        ret = are_bricks_offline(self.mnode, self.volname, [brick_2])
        self.assertTrue(
            ret, 'Not able to validate brick {} being '
            'offline'.format(brick_2))
        g.log.info("Brick %s is brought offline successfully", brick_2)
        self.offline_bricks.append(brick_2)

        # Wait for IO to fill the bricks
        sleep(30)

        # Bring first brick online and validate peer status
        ret = bring_bricks_online(
            self.mnode,
            self.volname, [brick_1],
            bring_bricks_online_methods=['glusterd_restart'])
        self.assertTrue(ret, 'Not able to bring brick {} '
                        'online'.format(brick_1))
        g.log.info("Offlined brick %s is brought online successfully", brick_1)
        ret = self.validate_peers_are_connected()
        self.assertTrue(
            ret, "Peers are not in connected state after bringing "
            "an offline brick to online via `glusterd restart`")
        g.log.info("Successfully validated peers are in connected state")

        # To catchup onlined brick with healthy bricks
        sleep(30)

        # Validate the xattr to be same on onlined and healthy bric
        online_bricks = get_online_bricks_list(self.mnode, self.volname)
        self.assertIsNotNone(online_bricks, 'Unable to fetch online bricks')
        g.log.info('All online bricks are fetched successfully')
        for xattr in ('trusted.ec.size', 'trusted.ec.version'):
            ret = validate_xattr_on_all_bricks([brick_1, brick_3],
                                               self.file_name, xattr)
            self.assertTrue(
                ret, "{} is not same on all online "
                "bricks".format(xattr))

        # Get epoch time on the client
        ret, prev_ctime, _ = g.run(self.client, 'date +%s')
        self.assertEqual(ret, 0, 'Not able to get epoch time from client')

        # Headroom for file ctime to get updated
        sleep(5)

        # Validate file was being apended while checking for xattrs
        ret = get_file_stat(
            self.client, '{}/{}'.format(self.mount_obj.mountpoint,
                                        self.file_name))
        self.assertIsNotNone(ret, "Not able to get stats of the file")
        curr_ctime = ret['epoch_ctime']
        self.assertGreater(
            int(curr_ctime), int(prev_ctime), "Not able "
            "to validate data is appended to the file "
            "while checking for xaatrs")

        g.log.info("Data on all online bricks is healed and consistent")
Exemple #25
0
 def _check_change_time_mnt(self, file_name):
     """Find out the modification time for file on mountpoint"""
     file_ctime_mnt = (get_file_stat(
         self.client, "{}/{}".format(self.m_point,
                                     file_name)))['epoch_ctime']
     return file_ctime_mnt