コード例 #1
0
    def test_symlink_file(self):
        """
        Description: Create symbolic link file, validate and access file
                     using it

        Steps:
        1) From mount point, create a regular file
        2) Verify that file is stored on only on bricks which is
           mentioned in trusted.glusterfs.pathinfo xattr
        3) From mount point create symbolic link file for the created file
        4) From mount point stat on the symbolic link file and original file;
           file inode should be different
        5) From mount point, verify that file contents are same
           "md5sum"
        6) Verify "trusted.gfid" extended attribute of the file
           on sub-vol
        7) Verify readlink on symbolic link from mount point
           "readlink "
        8) From sub-volume verify that content of file are same
        """
        # Create a regular file on mountpoint
        self._create_file_using_touch("test_file")

        # Check file is create on bricks as per trusted.glusterfs.pathinfo
        self._is_file_present_on_brick("test_file")

        # Create a symbolic-link file for the test_file
        ret = create_link_file(self.client,
                               "{}/test_file".format(self.m_point),
                               "{}/softlink_file".format(self.m_point),
                               soft=True)
        self.assertTrue(ret, "Failed to create symbolic link file for"
                        " test_file")
        g.log.info("Successfully created softlink_file")

        # On mountpoint perform stat on original and symbolic-link file
        # The value of inode should be different
        values = ["inode"]
        self._collect_and_compare_file_info_on_mnt("softlink_file",
                                                   values,
                                                   expected=False)

        # Check the md5sum on original and symbolic-link file on mountpoint
        self._compare_file_md5sum_on_mnt("softlink_file")

        # Compare the value of trusted.gfid for test_file and
        # symbolic-link file on backend-bricks
        self._compare_gfid_xattr_on_files("softlink_file")

        # Verify readlink on symbolic-link from mount point
        cmd = "readlink {}/softlink_file".format(self.m_point)
        ret, out, _ = g.run(self.client, cmd)
        self.assertEqual(out.strip(), "{}/test_file".format(self.m_point),
                         "Symbolic link points to incorrect file")
        g.log.info("Symbolic link points to correct file")

        # Check the md5sum on original and symbolic-link file on backend bricks
        self._compare_file_md5sum_on_bricks("softlink_file")
コード例 #2
0
    def test_hard_link_file(self):
        """
        Description: link file create, validate and access file
                     using it

        Steps:
        1) From mount point, create a regular file
        2) Verify that file is stored on only on bricks which is
           mentioned in trusted.glusterfs.pathinfo xattr
        3) From mount point create hard-link file for the created file
        4) From mount point stat on the hard-link file and original file;
           file inode, permission, size should be same
        5) From mount point, verify that file contents are same
           "md5sum"
        6) Verify "trusted.gfid" extended attribute of the file
           on sub-vol
        7) From sub-volume stat on the hard-link file and original file;
           file inode, permission, size should be same
        8) From sub-volume verify that content of file are same
        """
        # Create a regular file
        self._create_file_using_touch("test_file")

        # Check file is create on bricks as per trusted.glusterfs.pathinfo
        self._is_file_present_on_brick("test_file")

        # Create a hard-link file for the test_file
        ret = create_link_file(self.client,
                               "{}/test_file".format(self.m_point),
                               "{}/hardlink_file".format(self.m_point))
        self.assertTrue(ret, "Failed to create hard link file for"
                        " test_file")
        g.log.info("Successfully created hardlink_file")

        # On mountpoint perform stat on original and hard-link file
        values = ["inode", "access", "size"]
        self._collect_and_compare_file_info_on_mnt("hardlink_file",
                                                   values,
                                                   expected=True)

        # Check the md5sum on original and hard-link file on mountpoint
        self._compare_file_md5sum_on_mnt("hardlink_file")

        # Compare the value of trusted.gfid for test_file and hard-link file
        # on backend-bricks
        self._compare_gfid_xattr_on_files("hardlink_file")

        # On backend bricks perform stat on original and hard-link file
        values = ["inode", "access", "size"]
        self._collect_and_compare_file_info_on_mnt("hardlink_file", values)

        # On backend bricks check the md5sum
        self._compare_file_md5sum_on_bricks("hardlink_file")
コード例 #3
0
    def test_ec_read_from_hardlink(self):
        """
        Test steps:
        1. Enable metadata-cache(md-cache) options on the volume
        2. Touch a file and create a hardlink for it
        3. Read data from the hardlink.
        4. Read data from the actual file.
        """
        options = {'group': 'metadata-cache'}
        # Set metadata-cache options as group
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, "Unable to set the volume options {}".
                        format(options))
        g.log.info("Able to set the %s options", options)

        # Mounting the volume on one client
        ret, _, _ = mount_volume(self.volname, mtype=self.mount_type,
                                 mpoint=self.mounts[0].mountpoint,
                                 mserver=self.mnode,
                                 mclient=self.mounts[0].client_system)
        self.assertEqual(ret, 0, ("Volume {} is not mounted").
                         format(self.volname))
        g.log.info("Volume mounted successfully : %s", self.volname)

        file_name = self.mounts[0].mountpoint + "/test1"
        content = "testfile"
        hard_link = self.mounts[0].mountpoint + "/test1_hlink"
        cmd = 'echo "{content}" > {file}'.format(file=file_name,
                                                 content=content)

        # Creating a file with data
        ret, _, _ = g.run(self.mounts[0].client_system, cmd)
        self.assertEqual(ret, 0, "Sucessful in creating a file with data")
        g.log.info("file created successfully on %s",
                   self.mounts[0].mountpoint)

        # Creating a hardlink for the file created
        ret = create_link_file(self.mounts[0].client_system,
                               file_name, hard_link)
        self.assertTrue(ret, "Link file creation failed")
        g.log.info("Link file creation for %s is successful", file_name)

        # Reading from the file as well as the hardlink
        for each in (file_name, hard_link):
            ret, out, _ = g.run(self.mounts[0].client_system,
                                "cat {}".format(each))
            self.assertEqual(ret, 0, "Unable to read the {}".format(each))
            self.assertEqual(content, out.strip('\n'), "The content {} and"
                             " data in file {} is not same".
                             format(content, each))
            g.log.info("Read of %s file is successful", each)
コード例 #4
0
    def create_links(self, client, path):

        # Soft links
        for i in range(4, 7):
            ret = create_link_file(client,
                                   '{}/file{}_or'.format(path, i),
                                   '{}/file{}_sl'.format(path, i),
                                   soft=True)
            self.assertTrue(
                ret, "Fail: Not able to create soft link for "
                "{}/file{}_or".format(path, i))
        g.log.info("Created soft links for files successfully")

        # Hard links
        for i in range(7, 10):
            ret = create_link_file(
                client,
                '{}/file{}_or'.format(path, i),
                '{}/file{}_hl'.format(path, i),
            )
            self.assertTrue(
                ret, "Fail: Not able to create hard link for "
                "{}/file{}_or".format(path, i))
        g.log.info("Created hard links for files successfully")
コード例 #5
0
    def test_afr_heal_with_brickdown_hardlink(self):
        """
        Steps:
        1. Create  2 * 3 distribute replicate volume and disable all heals
        2. Create a file and 3 hardlinks to it from fuse mount.
        3. Kill brick4, rename HLINK1 to an appropriate name so that
           it gets hashed to replicate-1
        4. Likewise rename HLINK3 and HLINK7 as well, killing brick5 and brick6
           respectively each time. i.e. a different brick of the 2nd
           replica is down each time.
        5. Now enable shd and let selfheals complete.
        6. Heal should complete without split-brains.
        """
        bricks_list = get_all_bricks(self.mnode, self.volname)
        options = {
            "metadata-self-heal": "off",
            "entry-self-heal": "off",
            "data-self-heal": "off",
            "self-heal-daemon": "off"
        }
        g.log.info("setting options %s", options)
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, ("Unable to set volume option %s for"
                              "volume %s" % (options, self.volname)))
        g.log.info("Successfully set %s for volume %s", options, self.volname)

        cmd = ("touch %s/FILE" % self.mounts[0].mountpoint)
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "file creation failed")

        # Creating a hardlink for the file created
        for i in range(1, 4):
            ret = create_link_file(
                self.clients[0], '{}/FILE'.format(self.mounts[0].mountpoint),
                '{}/HLINK{}'.format(self.mounts[0].mountpoint, i))
            self.assertTrue(ret, "Unable to create hard link file ")

        # Bring brick3 offline,Rename file HLINK1,and bring back brick3 online
        self._test_brick_down_with_file_rename("HLINK1", "NEW-HLINK1",
                                               bricks_list[3])

        # Bring brick4 offline,Rename file HLINK2,and bring back brick4 online
        self._test_brick_down_with_file_rename("HLINK2", "NEW-HLINK2",
                                               bricks_list[4])

        # Bring brick5 offline,Rename file HLINK3,and bring back brick5 online
        self._test_brick_down_with_file_rename("HLINK3", "NEW-HLINK3",
                                               bricks_list[5])

        # Setting options
        options = {"self-heal-daemon": "on"}
        ret = set_volume_options(self.mnode, self.volname, options)
        self.assertTrue(ret, 'Failed to set options %s' % options)
        g.log.info("Option 'self-heal-daemon' is set to 'on' successfully")

        # Start healing
        ret = trigger_heal(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal is not started')
        g.log.info('Healing is started')

        # Monitor heal completion
        ret = monitor_heal_completion(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal has not yet completed')

        # Check if heal is completed
        ret = is_heal_complete(self.mnode, self.volname)
        self.assertTrue(ret, 'Heal is not complete')
        g.log.info('Heal is completed successfully')

        # Check for split-brain
        ret = is_volume_in_split_brain(self.mnode, self.volname)
        self.assertFalse(ret, 'Volume is in split-brain state')
        g.log.info('Volume is not in split-brain state')

        # Check data on mount point
        cmd = ("ls %s" % (self.mounts[0].mountpoint))
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "failed to fetch data from mount point")
コード例 #6
0
    def test_dht_custom_xattr(self):
        """
        Test case:
        1.Create a gluster volume and start it.
        2.Create file and link files.
        3.Create a custom xattr for file.
        4.Verify that xattr for file is displayed on
          mount point and bricks
        5.Modify custom xattr value and verify that xattr
          for file is displayed on mount point and bricks
        6.Verify that custom xattr is not displayed
          once you remove it
        7.Create a custom xattr for symbolic link.
        8.Verify that xattr for symbolic link
          is displayed on mount point and sub-volume
        9.Modify custom xattr value and verify that
          xattr for symbolic link is displayed on
          mount point and bricks
        10.Verify that custom xattr is not
           displayed once you remove it.
        """
        # Initializing variables
        mount_point = self.mounts[0].mountpoint
        self.client_node = self.mounts[0].client_system
        self.list_of_files, list_of_softlinks = [], []
        list_of_hardlinks = []

        for number in range(1, 3):

            # Create regular files
            fname = '{0}/regular_file_{1}'.format(mount_point, str(number))
            ret = append_string_to_file(self.client_node, fname,
                                        'Sample content for file.')
            self.assertTrue(
                ret, "Unable to create regular file "
                "{}".format(fname))
            self.list_of_files.append(fname)

            # Create hard link for file
            hardlink = '{0}/link_file_{1}'.format(mount_point, str(number))
            ret = create_link_file(self.client_node, fname, hardlink)
            self.assertTrue(
                ret, "Unable to create hard link file "
                "{}".format(hardlink))
            list_of_hardlinks.append(hardlink)

            # Create soft link for file
            softlink = '{0}/symlink_file_{1}'.format(mount_point, str(number))
            ret = create_link_file(self.client_node,
                                   fname,
                                   softlink,
                                   soft=True)
            self.assertTrue(
                ret, "Unable to create symlink file "
                "{}".format(softlink))
            list_of_softlinks.append(softlink)

        self.files_and_soft_links = self.list_of_files + list_of_softlinks

        # Check if files are created on the right subvol
        ret = validate_files_in_dir(
            self.client_node,
            mount_point,
            file_type=k.FILETYPE_FILES,
            test_type=k.TEST_FILE_EXISTS_ON_HASHED_BRICKS)
        self.assertTrue(ret, "Files not created on correct sub-vols")
        g.log.info("Files are on correct sub-vols according to "
                   "the hash value")

        # Set custom xattr on all the regular files
        self.set_xattr_user_foo(self.list_of_files, 'bar2')

        # Check if custom xattr is set to all the regular files
        self.check_custom_xattr_visible('bar2')

        # Change the custom xattr on all the regular files
        self.set_xattr_user_foo(self.list_of_files, 'ABC')

        # Check if xattr is set to all the regular files
        self.check_custom_xattr_visible('ABC')

        # Delete Custom xattr from all regular files
        self.delete_xattr_user_foo(self.list_of_files)

        # Check mount point and brick for the xattr
        list_of_all_files = list_of_hardlinks + self.files_and_soft_links
        self.check_mount_point_and_bricks_for_xattr(list_of_all_files)

        # Check if pathinfo xattr is visible
        self.check_for_trusted_glusterfs_pathinfo(self.list_of_files)

        # Set custom xattr on all the regular files
        self.set_xattr_user_foo(list_of_softlinks, 'bar2')

        # Check if custom xattr is set to all the regular files
        self.check_custom_xattr_visible('bar2')

        # Change the custom xattr on all the regular files
        self.set_xattr_user_foo(list_of_softlinks, 'ABC')

        # Check if xattr is set to all the regular files
        self.check_custom_xattr_visible('ABC')

        # Delete Custom xattr from all regular files
        self.delete_xattr_user_foo(list_of_softlinks)

        # Check mount point and brick for the xattr
        self.check_mount_point_and_bricks_for_xattr(list_of_all_files)

        # Check if pathinfo xattr is visible
        self.check_for_trusted_glusterfs_pathinfo(list_of_softlinks)
コード例 #7
0
    def test_rename_files_with_brick_down(self):
        """
        Description: Tests to check that there is no data loss when rename is
                      performed with a brick of volume down.
         Steps :
         1) Create a volume.
         2) Mount the volume using FUSE.
         3) Create 1000 files on the mount point.
         4) Create the soft-link for file{1..100}
         5) Create the hard-link for file{101..200}
         6) Check for the file count on the mount point.
         7) Begin renaming the files, in multiple iterations.
         8) Let few iterations of the rename complete successfully.
         9) Then while rename is still in progress, kill a brick part of the
            volume.
         10) Let the brick be down for sometime, such that the a couple
             of rename iterations are completed.
         11) Bring the brick back online.
         12) Wait for the IO to complete.
         13) Check if there is any data loss.
         14) Check if all the files are renamed properly.
         """
        # Creating 1000 files on volume root
        m_point = self.mounts[0].mountpoint
        command = 'touch ' + m_point + '/file{1..1000}_0'
        ret, _, _ = g.run(self.clients[0], command)
        self.assertEqual(ret, 0, "File creation failed on %s"
                         % m_point)
        g.log.info("Files successfully created on the mount point")

        # Create soft links for a few files
        for i in range(1, 100):
            ret = create_link_file(self.clients[0],
                                   '{}/file{}_0'.format(m_point, i),
                                   '{}/soft_link_file{}_0'.format(m_point, i),
                                   soft=True)
            self.assertTrue(ret, "Failed to create soft links for files")
        g.log.info("Created soft links for files successfully")

        # Create hard links for a few files
        for i in range(101, 200):
            ret = create_link_file(self.clients[0],
                                   '{}/file{}_0'.format(m_point, i),
                                   '{}/hard_link_file{}_0'.format(m_point, i),
                                   soft=False)
            self.assertTrue(ret, "Failed to create hard links for files")
        g.log.info("Created hard links for files successfully")

        # Calculate file count for the mount-point
        cmd = ("ls -lR %s/ | wc -l" % m_point)
        ret, count_before, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "Failed to get file count")
        g.log.info("File count before rename is:%s", count_before)

        # Start renaming the files in multiple iterations
        g.log.info("Starting to rename the files")
        all_mounts_procs = []
        cmd = ('for i in `seq 1 1000`; do for j in `seq 0 5`;do mv -f '
               '%s/file$i\\_$j %s/file$i\\_$(expr $j + 1); done; done'
               % (m_point, m_point))
        proc = g.run_async(self.mounts[0].client_system, cmd,
                           user=self.mounts[0].user)
        all_mounts_procs.append(proc)

        # Waiting for some time for a iteration of rename to complete
        g.log.info("Waiting for few rename iterations to complete")
        sleep(120)

        # Get the information about the bricks part of the volume
        brick_list = get_all_bricks(self.mnode, self.volname)

        # Kill a brick part of the volume
        ret = bring_bricks_offline(self.volname, choice(brick_list))
        self.assertTrue(ret, "Failed to bring brick offline")
        g.log.info("Successfully brought brick offline")

        # Let the brick be down for some time
        g.log.info("Keeping brick down for few minutes")
        sleep(60)

        # Bring the brick online using gluster v start force
        ret, _, _ = volume_start(self.mnode, self.volname, force=True)
        self.assertEqual(ret, 0, "Volume start with force failed")
        g.log.info("Volume start with force successful")

        # Close connection and check if rename has completed
        ret, _, _ = proc.async_communicate()
        self.assertEqual(ret, 0, "Rename is not completed")
        g.log.info("Rename is completed")

        # Do lookup on the files
        # Calculate file count from mount
        cmd = ("ls -lR %s/ | wc -l" % m_point)
        ret, count_after, _ = g.run(self.clients[0], cmd)
        self.assertEqual(ret, 0, "Failed to do lookup and"
                         "get file count")
        g.log.info("Lookup successful. File count after"
                   " rename is:%s", count_after)

        # Check if there is any data loss
        self.assertEqual(int(count_before), int(count_after),
                         "The file count before and after"
                         " rename is not same. There is data loss.")
        g.log.info("The file count before and after rename is same."
                   " No data loss occurred.")

        # Checking if all files were renamed Successfully
        ret = get_volume_type(brick_list[0] + "/")
        if ret in ("Replicate", "Disperse", "Arbiter", "Distributed-Replicate",
                   "Distribute-Disperse", "Distribute-Arbiter"):
            cmd = ("ls -lR %s/file*_6 | wc -l" % m_point)
            ret, out, _ = g.run(self.clients[0], cmd)
            self.assertEqual(int(out), 1000, "Rename failed on some files")
            g.log.info("All the files are renamed successfully")
コード例 #8
0
    def test_ec_rename_files_with_brick_down(self):
        """
        Description: Test to check no errors on file/dir renames when one of
                        the bricks is down in the volume.
        Steps:
        1. Create an EC volume
        2. Mount the volume using FUSE on two different clients
        3. Create ~9 files from one of the client
        4. Create ~9 dir with ~9 files each from another client
        5. Create soft-links, hard-links for file{4..6}, file{7..9}
        6. Create soft-links for dir{4..6}
        7. Begin renaming the files, in multiple iterations
        8. Bring down a brick while renaming the files
        9. Bring the brick online after renaming some of the files
        10. Wait for renaming of the files
        11. Validate no data loss and files are renamed successfully
        """

        # Creating ~9 files from client 1 on mount
        m_point = self.mounts[0].mountpoint
        cmd = 'cd %s; touch file{1..9}_or' % m_point
        ret, _, _ = g.run(self.clients[0], cmd)
        self.assertEqual(
            ret, 0, "Fail: Not able to create files on "
            "{}".format(m_point))
        g.log.info("Files created successfully on mount point")

        # Creating 9 dir X 9 files in each dir from client 2
        cmd = ('cd %s; mkdir -p dir{1..9}_or; touch '
               'dir{1..9}_or/file{1..9}_or' % m_point)
        ret, _, _ = g.run(self.clients[1], cmd)
        self.assertEqual(
            ret, 0, "Fail: Not able to create dir with files on "
            "{}".format(m_point))
        g.log.info("Dirs with files are created successfully on mount point")

        # Create required soft links and hard links from client 1 on mount
        client, path = self.clients[0], m_point
        self.create_links(client, path)

        client = self.clients[1]
        for i in range(1, 10):

            # Create required soft and hard links in nested dirs
            path = '{}/dir{}_or'.format(m_point, i)
            self.create_links(client, path)

        # Create soft links for dirs
        path = m_point
        for i in range(4, 7):
            ret = create_link_file(client,
                                   '{}/dir{}_or'.format(path, i),
                                   '{}/dir{}_sl'.format(path, i),
                                   soft=True)
            self.assertTrue(
                ret, "Fail: Not able to create soft link for "
                "{}/dir{}_or".format(path, i))
        g.log.info("Created nested soft and hard links for files successfully")

        # Calculate all file count against each section orginal, hard, soft
        # links
        cmd = ('cd %s; arr=(or sl hl); '
               'for i in ${arr[*]}; do find . -name "*$i" | wc -l ; '
               'done; ' % m_point)
        ret, out, _ = g.run(client, cmd)
        self.assertEqual(
            ret, 0, "Not able get list of soft and hard links "
            "created on the mount point")
        all_org, all_soft, all_hard = out.split()

        # Rename 2 out of 3 dir's soft links from client 1
        client = self.clients[0]
        cmd = ('cd %s; sl=0; '
               'for line in `ls -R | grep -P "dir(4|5)_sl"`; '
               'do mv -f "$line" "$line""_renamed"; ((sl++)); done; '
               'echo $sl;' % m_point)
        ret, out, _ = g.run(client, cmd)
        self.assertEqual(ret, 0, "Not able to rename directory soft links")
        temp_soft = out.strip()

        # Start renaming original files from client 1 and
        # softlinks, hardlinks  from client 2
        cmd = ('cd %s; arr=(. dir{1..9}_or);  or=0; '
               'for item in ${arr[*]}; do '
               'cd $item; '
               'for line in `ls | grep -P "file(1|2)_or"`; '
               'do mv -f "$line" "$line""_renamed"; ((or++)); sleep 2; done;'
               'cd - > /dev/null; sleep 1; done; echo $or ' % m_point)
        proc_or = g.run_async(client, cmd)

        client = self.clients[1]
        cmd = ('cd %s; arr=(. dir{1..9}_or); sl=0; hl=0; '
               'for item in ${arr[*]}; do '
               'cd $item; '
               'for line in `ls | grep -P "file(4|5)_sl"`; '
               'do mv -f "$line" "$line""_renamed"; ((sl++)); sleep 1; done; '
               'for line in `ls | grep -P "file(7|8)_hl"`; '
               'do mv -f "$line" "$line""_renamed"; ((hl++)); sleep 1; done; '
               'cd - > /dev/null; sleep 1; done; echo $sl $hl; ' % m_point)
        proc_sl_hl = g.run_async(client, cmd)

        # Wait for some files to be renamed
        sleep(20)

        # Kill one of the bricks
        brick_list = get_all_bricks(self.mnode, self.volname)
        ret = bring_bricks_offline(self.volname, choice(brick_list))
        self.assertTrue(ret, "Failed to bring one of the bricks offline")

        # Wait for some more files to be renamed
        sleep(20)

        # Bring brick online
        ret, _, _ = volume_start(self.mnode, self.volname, force=True)
        self.assertEqual(ret, 0, "Not able to start Volume with force option")

        # Wait for rename to complete and take count of file operations
        ret, out, _ = proc_or.async_communicate()
        self.assertEqual(ret, 0, "Fail: Origianl files are not renamed")
        ren_org = out.strip()

        ret, out, _ = proc_sl_hl.async_communicate()
        self.assertEqual(ret, 0, "Fail: Soft and Hard links are not renamed")
        ren_soft, ren_hard = out.strip().split()
        ren_soft = str(int(ren_soft) + int(temp_soft))

        # Count actual data of renaming links/files
        cmd = ('cd %s; arr=(or or_renamed sl sl_renamed hl hl_renamed); '
               'for i in ${arr[*]}; do find . -name "*$i" | wc -l ; '
               'done; ' % m_point)
        ret, out, _ = g.run(client, cmd)
        self.assertEqual(
            ret, 0, "Not able to get count of original and link "
            "files after brick was brought up")
        (act_org, act_org_ren, act_soft, act_soft_ren, act_hard,
         act_hard_ren) = out.split()

        # Validate count of expected and actual rename of
        # links/files is matching
        for exp, act, msg in ((ren_org, act_org_ren, 'original'),
                              (ren_soft, act_soft_ren, 'soft links'),
                              (ren_hard, act_hard_ren, 'hard links')):
            self.assertEqual(
                exp, act, "Count of {} files renamed while brick "
                "was offline is not matching".format(msg))

        # Validate no data is lost in rename process
        for exp, act, msg in (
            (int(all_org) - int(act_org_ren), int(act_org), 'original'),
            (int(all_soft) - int(act_soft_ren), int(act_soft), 'soft links'),
            (int(all_hard) - int(act_hard_ren), int(act_hard), 'hard links'),
        ):
            self.assertEqual(
                exp, act, "Count of {} files which are not "
                "renamed while brick was offline "
                "is not matching".format(msg))
コード例 #9
0
    def test_io_with_cyclic_brick_down(self):
        """
        Description: To check heal process on EC volume when brick is brought
                    down in a cyclic fashion
        Steps:
        - Create, start and mount an EC volume in two clients
        - Create multiple files and directories including all file types on one
          directory from client 1
        - Take arequal check sum of above data
        - Create another folder and pump different fops from client 2
        - Fail and bring up redundant bricks in a cyclic fashion in all of the
          subvols maintaining a minimum delay between each operation
        - In every cycle create new dir when brick is down and wait for heal
        - Validate heal info on volume when brick down erroring out instantly
        - Validate arequal on brining the brick offline
        """

        # Create a directory structure on mount from client 1
        mount_obj = self.mounts[0]
        cmd = ('/usr/bin/env python {}/file_dir_ops.py '
               'create_deep_dirs_with_files --dir-depth 3 '
               '--max-num-of-dirs 5 --fixed-file-size 10k '
               '--num-of-files 9 {}'.format(
                   self.script_path,
                   mount_obj.mountpoint,
               ))
        ret, _, _ = g.run(mount_obj.client_system, cmd)
        self.assertEqual(ret, 0, 'Not able to create directory structure')
        dir_name = 'user1'
        for i in range(5):
            ret = create_link_file(
                mount_obj.client_system,
                '{}/{}/testfile{}.txt'.format(mount_obj.mountpoint, dir_name,
                                              i),
                '{}/{}/testfile{}_sl.txt'.format(mount_obj.mountpoint,
                                                 dir_name, i),
                soft=True)
        self.assertTrue(ret, 'Not able to create soft links')
        for i in range(5, 9):
            ret = create_link_file(
                mount_obj.client_system,
                '{}/{}/testfile{}.txt'.format(mount_obj.mountpoint, dir_name,
                                              i),
                '{}/{}/testfile{}_hl.txt'.format(mount_obj.mountpoint,
                                                 dir_name, i))
        self.assertTrue(ret, 'Not able to create hard links')
        g.log.info('Successfully created directory structure consisting all '
                   'file types on mount')

        # Take note of arequal checksum
        ret, exp_arequal = collect_mounts_arequal(mount_obj, path=dir_name)
        self.assertTrue(ret, 'Failed to get arequal checksum on mount')

        # Get all the subvols in the volume
        subvols = get_subvols(self.mnode, self.volname)
        self.assertTrue(subvols.get('volume_subvols'), 'Not able to get '
                        'subvols of the volume')

        # Create a dir, pump IO in that dir, offline b1, wait for IO and
        # online b1, wait for heal of b1, bring b2 offline...
        m_point, m_client = (self.mounts[1].mountpoint,
                             self.mounts[1].client_system)
        cur_off_bricks = ''
        for count, off_brick in enumerate(zip(*subvols.get('volume_subvols')),
                                          start=1):

            # Bring offline bricks online by force starting volume
            if cur_off_bricks:
                self._bring_bricks_online_and_monitor_heal(cur_off_bricks)

            # Create a dir for running IO
            ret = mkdir(m_client, '{}/dir{}'.format(m_point, count))
            self.assertTrue(
                ret, 'Not able to create directory for '
                'starting IO before offline of brick')

            # Start IO in the newly created directory
            cmd = ('/usr/bin/env python {}/fd_writes.py -n 10 -t 480 -d 5 -c '
                   '16 --dir {}/dir{}'.format(self.script_path, m_point,
                                              count))
            proc = g.run_async(m_client, cmd)
            self.all_mounts_procs.append(proc)

            # Wait IO to partially fill the dir
            sleep(10)

            # Bring a single brick offline from all of subvols
            ret = bring_bricks_offline(self.volname, list(off_brick))
            self.assertTrue(ret,
                            'Not able to bring {} offline'.format(off_brick))

            # Validate heal info errors out, on brining bricks offline in < 5s
            start_time = datetime.now().replace(microsecond=0)
            ret, _, _ = heal_info(self.mnode, self.volname)
            end_time = datetime.now().replace(microsecond=0)
            self.assertEqual(
                ret, 0, 'Not able to query heal info status '
                'of volume when a brick is offline')
            self.assertLess(
                end_time - start_time, timedelta(seconds=5),
                'Query of heal info of volume when a brick is '
                'offline is taking more than 5 seconds')

            # Wait for some more IO to fill dir
            sleep(10)

            # Validate arequal on initial static dir
            ret, act_arequal = collect_mounts_arequal(mount_obj, path=dir_name)
            self.assertTrue(
                ret, 'Failed to get arequal checksum on bringing '
                'a brick offline')
            self.assertEqual(
                exp_arequal, act_arequal, 'Mismatch of arequal '
                'checksum before and after killing a brick')

            cur_off_bricks = off_brick

        # Take note of ctime on mount
        ret, prev_ctime, _ = g.run(m_client, 'date +%s')
        self.assertEqual(ret, 0, 'Not able to get epoch time from client')

        self._bring_bricks_online_and_monitor_heal(cur_off_bricks)

        # Validate IO was happening during brick operations
        # and compare ctime of recent file to current epoch time
        ret = validate_io_procs(self.all_mounts_procs,
                                [self.mounts[0]] * len(self.all_mounts_procs))
        self.assertTrue(ret, 'Not able to validate completion of IO on mounts')
        self.all_mounts_procs *= 0  # don't validate IO in tearDown
        ret, curr_ctime, _ = g.run(
            m_client, "find {} -printf '%C@\n' -type f | "
            'sort -r | head -n 1'.format(m_point))
        self.assertEqual(
            ret, 0, 'Not able to get ctime of last edited file from the mount')
        self.assertGreater(
            float(curr_ctime), float(prev_ctime), 'Not able '
            'to validate IO was happening during brick operations')

        g.log.info('Completed IO continuity test on EC volume successfully')