def _compare_gfid_xattr_on_files(self, link_file_name, expected=True): """Collect and compare the value of trusted.gfid xattr for file on backend-bricks""" brick_list_test_file = get_pathinfo( self.client, "{}/test_file".format(self.m_point)) xattr_list_test_file = [] for brick in brick_list_test_file['brickdir_paths']: host, path = brick.split(':') xattr_test_file = get_fattr(host, path, "trusted.gfid") xattr_list_test_file.append(xattr_test_file) brick_list_link_file = get_pathinfo( self.client, "{}/{}".format(self.m_point, link_file_name)) xattr_list_link_file = [] for brick in brick_list_link_file['brickdir_paths']: host, path = brick.split(':') xattr_link_file = get_fattr(host, path, "trusted.gfid") xattr_list_link_file.append(xattr_link_file) if expected is True: self.assertEqual( xattr_list_test_file, xattr_list_link_file, "Unexpected: The xattr trusted.gfid is not same " "for test_file and {}".format(link_file_name)) g.log.info( "The xattr trusted.gfid is same for test_file" " and %s", link_file_name) else: self.assertNotEqual( xattr_list_test_file, xattr_list_link_file, "Unexpected: The xattr trusted.gfid is same " "for test_file and {}".format(link_file_name)) g.log.info( "The xattr trusted.gfid is not same for test_file" " and %s", link_file_name)
def run_layout_tests(mnode, fqpath, layout, test_type): """run the is_complete and/or is_balanced tests""" ret = get_pathinfo(mnode, fqpath) brick_path_list = ret.get('brickdir_paths') for brickdir_path in brick_path_list: (server_ip, _) = brickdir_path.split(':') if get_volume_type(brickdir_path) in ('Replicate', 'Disperse', 'Arbiter'): g.log.info("Cannot check for layout completeness as" " volume under test is Replicate/Disperse/Arbiter") else: if test_type & k.TEST_LAYOUT_IS_COMPLETE: g.log.info("Testing layout complete for %s" % fqpath) if not layout.is_complete: msg = ("Layout for %s IS NOT COMPLETE" % fqpath) g.log.error(msg) raise gex.LayoutIsNotCompleteError(msg) if test_type & k.TEST_LAYOUT_IS_BALANCED: g.log.info("Testing layout balance for %s" % fqpath) if not layout.is_balanced: msg = ("Layout for %s IS NOT BALANCED" % fqpath) g.log.error(msg) raise gex.LayoutIsNotBalancedError(msg) # returning True until logic requires non-exception error check(s) return True
def _compare_file_permissions(self, file_name, file_info_mnt=None, file_info_brick=None): """Check if the file's permission are same on mountpoint and backend-bricks""" if (file_info_mnt is None and file_info_brick is None): file_info_mnt = (get_file_stat( self.client, "{}/{}".format(self.m_point, file_name)))['access'] self.assertIsNotNone( file_info_mnt, "Failed to get access time for {}".format(file_name)) brick_list = get_pathinfo(self.client, "{}/{}".format(self.m_point, file_name)) self.assertNotEqual( brick_list, 0, "Failed to get bricklist for {}".format(file_name)) file_info_brick = [] for brick in brick_list['brickdir_paths']: host, path = brick.split(':') info_brick = (get_file_stat(host, path))['access'] file_info_brick.append(info_brick) for info in file_info_brick: self.assertEqual( info, file_info_mnt, "File details for {} are diffrent on" " backend-brick".format(file_name)) g.log.info("Details for file %s is correct" " on backend-bricks", file_name)
def _check_if_files_are_stored_only_on_expected_bricks(self): """Check if files are stored only on expected bricks""" for fname in self.list_of_device_files: # Fetch trusted.glusterfs.pathinfo and check if file is present on # brick or not ret = get_pathinfo(self.clients[0], fname) self.assertIsNotNone( ret, "Unable to get " "trusted.glusterfs.pathinfo of file %s" % fname) present_brick_list = [] for brick_path in ret['brickdir_paths']: node, path = brick_path.split(":") ret = file_exists(node, path) self.assertTrue( ret, "Unable to find file {} on brick {}".format(fname, path)) brick_text = brick_path.split('/')[:-1] if brick_text[0][0:2].isdigit(): brick_text[0] = gethostbyname(brick_text[0][:-1]) + ":" present_brick_list.append('/'.join(brick_text)) # Check on other bricks where file doesn't exist brick_list = get_all_bricks(self.mnode, self.volname) other_bricks = [ brk for brk in brick_list if brk not in present_brick_list ] for brick in other_bricks: node, path = brick.split(':') ret = file_exists(node, "{}/{}".format(path, fname.split('/')[-1])) self.assertFalse( ret, "Unexpected: Able to find file {} on " "brick {}".format(fname, path))
def _is_file_present_on_brick(self, file_name): """Check if file is created on the backend-bricks as per the value of trusted.glusterfs.pathinfo xattr""" brick_list = get_pathinfo(self.client, "{}/{}".format(self.m_point, file_name)) self.assertNotEqual(brick_list, 0, "Failed to get bricklist for {}".format(file_name)) for brick in brick_list['brickdir_paths']: host, path = brick.split(':') ret = file_exists(host, path) self.assertTrue( ret, "File {} is not present on {}".format(file_name, brick)) g.log.info("File %s is present on %s", file_name, brick)
def _compare_file_md5sum_on_bricks(self, link_file_name): """Collect and compare md5sum for file on backend-bricks""" brick_list_test_file = get_pathinfo( self.client, "{}/test_file".format(self.m_point)) md5sum_list_test_file = [] for brick in brick_list_test_file['brickdir_paths']: host, path = brick.split(':') md5sum_test_file, _ = (get_md5sum(host, path)).split() md5sum_list_test_file.append(md5sum_test_file) brick_list_link_file = get_pathinfo( self.client, "{}/{}".format(self.m_point, link_file_name)) md5sum_list_link_file = [] for brick in brick_list_link_file['brickdir_paths']: md5sum_link_file, _ = (get_md5sum(host, path)).split() md5sum_list_link_file.append(md5sum_link_file) self.assertEqual( md5sum_test_file, md5sum_link_file, "The md5sum for test_file and {} is" " not same on brick {}".format(link_file_name, brick)) g.log.info( "The md5sum for test_file and %s is same" " on backend brick %s", link_file_name, brick)
def _check_change_time_brick(self, file_name): """Find out the modification time for file on backend-bricks""" brick_list = get_pathinfo(self.client, "{}/{}".format(self.m_point, file_name)) self.assertNotEqual(brick_list, 0, "Failed to get bricklist for {}".format(file_name)) brick_mtime = [] for brick in brick_list['brickdir_paths']: host, path = brick.split(':') cmd = "ls -lR {}".format(path) ret, _, _ = g.run(host, cmd) self.assertEqual(ret, 0, "Lookup failed on" " brick:{}".format(path)) file_ctime_brick = (get_file_stat(host, path))['epoch_ctime'] brick_mtime.append(file_ctime_brick) return brick_mtime
def _compare_stat_output_from_mout_point_and_bricks(self): """Compare stat output from mountpoint and bricks""" for fname in self.list_of_device_files: # Fetch stat output from mount point mountpoint_stat = get_file_stat(self.clients[0], fname) bricks = get_pathinfo(self.clients[0], fname) # Fetch stat output from bricks for brick_path in bricks['brickdir_paths']: node, path = brick_path.split(":") brick_stat = get_file_stat(node, path) for key in ("filetype", "access", "size", "username", "groupname", "uid", "gid", "epoch_atime", "epoch_mtime", "epoch_ctime"): self.assertEqual( mountpoint_stat[key], brick_stat[key], "Difference observed between stat output " "of mountpoint and bricks for file %s" % fname)
def test_rebalance_stop_with_large_file(self): """ Testcase Steps: 1. Create and start a volume. 2. Mount volume on client and create a large file. 3. Add bricks to the volume and check layout 4. Rename the file such that it hashs to different subvol. 5. Start rebalance on volume. 6. Stop rebalance on volume. """ # Create file BIG1. command = ("dd if=/dev/urandom of={}/BIG1 bs=1024K count=10000".format( self.mounts[0].mountpoint)) ret, _, _ = g.run(self.mounts[0].client_system, command) self.assertEqual(ret, 0, "Unable to create file I/O failed") g.log.info('Successfully created file BIG1.') # Checking if file created on correct subvol or not. ret = validate_files_in_dir( self.mounts[0].client_system, self.mounts[0].mountpoint, file_type=k.FILETYPE_FILES, test_type=k.TEST_FILE_EXISTS_ON_HASHED_BRICKS) self.assertTrue(ret, "Files not created on correct subvol.") g.log.info("File BIG1 is on correct subvol according to " "the hash value") # Adding brick to volume add_brick_list = form_bricks_list_to_add_brick(self.mnode, self.volname, self.servers, self.all_servers_info) ret, _, _ = add_brick(self.mnode, self.volname, add_brick_list) self.assertEqual(ret, 0, "Unable to add bricks to volume") g.log.info("Successfully added bricks to volume.") # Check if brick is added successfully or not. current_bricks = get_all_bricks(self.mnode, self.volname) self.assertIsNotNone( current_bricks, "Unable to get " "current active bricks of volume") g.log.info("Successfully got active bricks of volume.") for brick in add_brick_list: self.assertIn(brick, current_bricks, ("Brick %s is not added to volume" % brick)) # Create directory testdir. ret = mkdir(self.mounts[0].client_system, self.mounts[0].mountpoint + '/testdir') self.assertTrue(ret, "Failed to create testdir directory") g.log.info("Successfuly created testdir directory.") # Layout should be set on the new brick and should be # continous and complete ret = validate_files_in_dir(self.mounts[0].client_system, self.mounts[0].mountpoint + '/testdir', test_type=k.TEST_LAYOUT_IS_COMPLETE) self.assertTrue(ret, "Layout not set for the new subvol") g.log.info("New subvol has been added successfully") # Rename file so that it gets hashed to different subvol file_index = 0 path_info_dict = get_pathinfo(self.mounts[0].client_system, self.mounts[0].mountpoint + '/BIG1') initial_brick_set = path_info_dict['brickdir_paths'] while True: # Calculate old_filename and new_filename and rename. file_index += 1 old_filename = "{}/BIG{}".format(self.mounts[0].mountpoint, file_index) new_filename = "{}/BIG{}".format(self.mounts[0].mountpoint, (file_index + 1)) ret, _, _ = g.run(self.mounts[0].client_system, "mv {} {}".format(old_filename, new_filename)) self.assertEqual(ret, 0, "Rename not successful") # Checking if it was moved to new subvol or not. path_info_dict = get_pathinfo( self.mounts[0].client_system, self.mounts[0].mountpoint + '/BIG%d' % (file_index + 1)) if path_info_dict['brickdir_paths'] != initial_brick_set: break g.log.info("file renamed successfully") # Start rebalance on volume ret, _, _ = rebalance_start(self.mnode, self.volname, fix_layout=False) self.assertEqual(ret, 0, "Rebalance did not start") g.log.info("Rebalance started successfully on volume %s", self.volname) # Stop rebelance on volume ret, _, _ = rebalance_stop(self.mnode, self.volname) self.assertEqual(ret, 0, "Rebalance stop command did not execute.") g.log.info("Rebalance stopped successfully on volume %s", self.volname) # Get rebalance status in xml command = ("gluster volume rebalance {} status --xml".format( self.volname)) ret, _, _ = g.run(self.mnode, command) self.assertEqual( ret, 1, "Unexpected: Rebalance still running " "even after stop.") g.log.info("Rebalance is not running after stop.")
def test_create_link_for_directory(self): g.log.info("creating a directory at mount point") m_point = self.mounts[0].mountpoint test_dir_path = 'test_dir' fqpath = m_point + '/' + test_dir_path flag = mkdir(self.clients[0], fqpath, True) self.assertTrue(flag, "failed to create a directory") fqpath = m_point + '/' + test_dir_path + '/dir{1..3}' flag = mkdir(self.clients[0], fqpath, True) self.assertTrue(flag, "failed to create sub directories") flag = validate_files_in_dir(self.clients[0], m_point + '/test_dir', test_type=k.TEST_LAYOUT_IS_COMPLETE) self.assertTrue(flag, "layout of test directory is complete") g.log.info("directory created successfully") g.log.info("creating a symlink for test_dir") sym_link_path = m_point + '/' + 'test_sym_link' command = 'ln -s ' + m_point + '/test_dir ' + sym_link_path ret, _, _ = g.run(self.mounts[0].client_system, command) self.assertEqual(ret, 0, "failed to create symlink for test_dir") command = 'stat ' + sym_link_path ret, out, _ = g.run(self.mounts[0].client_system, command) self.assertEqual(ret, 0, "stat command didn't return the details " "correctly") flag = False g.log.info("checking if the link is symbolic") if 'symbolic link' in out: flag = True self.assertTrue(flag, "the type of the link is not symbolic") g.log.info("the link is symbolic") g.log.info("checking if the sym link points to right directory") index_start = out.find('->') + 6 index_end = out.find("\n") - 3 dir_pointed = out[index_start:index_end] flag = False if dir_pointed == m_point + '/' + test_dir_path: flag = True self.assertTrue(flag, "sym link does not point to correct " "location") g.log.info("sym link points to right directory") g.log.info("The details of the symlink are correct") g.log.info("verifying that inode number of the test_dir " "and its sym link are different") command = 'ls -id ' + m_point + '/' + \ test_dir_path + ' ' + sym_link_path ret, out, _ = g.run(self.mounts[0].client_system, command) self.assertEqual(ret, 0, "inode numbers not retrieved by the " "ls command") list_of_inode_numbers = out.split('\n') flag = True if (list_of_inode_numbers[0].split(' ')[0] == list_of_inode_numbers[1].split(' ')[0]): flag = False self.assertTrue( flag, "the inode numbers of the dir and sym link " "are same") g.log.info("verified: inode numbers of the test_dir " "and its sym link are different") g.log.info("listing the contents of the test_dir from its sym " "link") command = 'ls ' + sym_link_path ret, out1, _ = g.run(self.mounts[0].client_system, command) self.assertEqual(ret, 0, "failed to list the contents using the " "sym link") command = 'ls ' + m_point + '/' + test_dir_path ret, out2, _ = g.run(self.mounts[0].client_system, command) self.assertEqual( ret, 0, "failed to list the contents of the " "test_dir using ls command") flag = False if out1 == out2: flag = True self.assertTrue( flag, "the contents listed using the sym link " "are not the same") g.log.info("the contents listed using the symlink are" " the same as that of the test_dir") g.log.info("verifying that mount point doesn't display important " "xattrs using the symlink") command = 'getfattr -d -m . -e hex ' + sym_link_path ret, out, _ = g.run(self.mounts[0].client_system, command) self.assertEqual(ret, 0, "failed to retrieve xattrs") list_xattrs = ['trusted.gfid', 'trusted.glusterfs.dht'] flag = True for xattr in list_xattrs: if xattr in out: flag = False self.assertTrue( flag, "important xattrs are being compromised" " using the symlink at the mount point") g.log.info("verified: mount point doesn't display important " "xattrs using the symlink") g.log.info("verifying that mount point shows path info xattr for the" " test_dir and sym link and is same for both") path_info_1 = get_pathinfo(self.mounts[0].client_system, m_point + '/' + test_dir_path) path_info_2 = get_pathinfo(self.mounts[0].client_system, sym_link_path) if path_info_1 == path_info_2: flag = True self.assertTrue( flag, "pathinfos for test_dir and its sym link " "are not same") g.log.info("pathinfos for test_dir and its sym link are same") g.log.info("verifying readlink on sym link at mount point returns " "the name of the directory") command = 'readlink ' + sym_link_path ret, out, _ = g.run(self.mounts[0].client_system, command) self.assertEqual(ret, 0, "readlink command returned an error") flag = False if out.rstrip() == m_point + '/' + test_dir_path: flag = True self.assertTrue(flag, "readlink did not return the path of the " "test_dir") g.log.info("readlink successfully returned the path of the test_dir")
def test_metadata_self_heal_on_open_fd(self): """ Description: Pro-active metadata self heal on open fd Steps : 1) Create a volume. 2) Mount the volume using FUSE. 3) Create test executable on volume mount. 4) While test execution is in progress, bring down brick1. 5) From mount point, change ownership, permission, group id of the test file. 6) While test execution is in progress, bring back brick1 online. 7) Do stat on the test file to check ownership, permission, group id on mount point and on bricks 8) Stop test execution. 9) Do stat on the test file to check ownership, permission, group id on mount point and on bricks. 10) There should be no pending heals in the heal info command. 11) There should be no split-brain. 12) Calculate arequal of the bricks and mount point and it should be same. """ # pylint: disable=too-many-statements,too-many-locals # pylint: disable=too-many-branches bricks_list = get_all_bricks(self.mnode, self.volname) self.assertIsNotNone(bricks_list, 'Brick list is None') client = self.clients[0] # Create test executable file on mount point m_point = self.mounts[0].mountpoint test_file = "testfile.sh" cmd = ("echo 'while true; do echo 'Press CTRL+C to stop execution';" " done' >> {}/{}".format(m_point, test_file)) ret, _, _ = g.run(client, cmd) self.assertEqual(ret, 0, "Failed to create test file") # Execute the test file cmd = "cd {}; sh {}".format(m_point, test_file) g.run_async(client, cmd) # Get pid of the test file _cmd = "ps -aux | grep -v grep | grep testfile.sh | awk '{print $2}'" ret, out, _ = g.run(client, _cmd) self.assertEqual(ret, 0, "Failed to get pid of test file execution") # Bring brick1 offline ret = bring_bricks_offline(self.volname, [bricks_list[1]]) self.assertTrue( ret, 'Failed to bring bricks {} ' 'offline'.format(bricks_list[1])) ret = are_bricks_offline(self.mnode, self.volname, [bricks_list[1]]) self.assertTrue(ret, 'Bricks {} are not ' 'offline'.format(bricks_list[1])) # change uid, gid and permission from client cmd = "chown {} {}/{}".format(self.user, m_point, test_file) ret, _, _ = g.run(client, cmd) self.assertEqual(ret, 0, "chown failed") cmd = "chgrp {} {}/{}".format(self.user, m_point, test_file) ret, _, _ = g.run(client, cmd) self.assertEqual(ret, 0, "chgrp failed") cmd = "chmod 777 {}/{}".format(m_point, test_file) ret, _, _ = g.run(client, cmd) self.assertEqual(ret, 0, "chown failed") # Bring brick1 online ret = bring_bricks_online(self.mnode, self.volname, [bricks_list[1]]) self.assertTrue( ret, 'Failed to bring bricks {} online'.format(bricks_list[1])) ret = get_pathinfo(client, "{}/{}".format(m_point, test_file)) self.assertIsNotNone( ret, "Unable to get " "trusted.glusterfs.pathinfo of file") nodes_to_check = {} bricks_list = [] for brick in ret['brickdir_paths']: node, brick_path = brick.split(':') if node[0:2].isdigit(): nodes_to_check[node] = os.path.dirname(brick_path) path = node + ":" + os.path.dirname(brick_path) else: nodes_to_check[gethostbyname(node)] = ( os.path.dirname(brick_path)) path = gethostbyname(node) + ":" + os.path.dirname(brick_path) bricks_list.append(path) nodes_to_check[client] = m_point # Verify that the changes are successful on bricks and client self._verify_stat_info(nodes_to_check, test_file) # Kill the test executable file for pid in out.split('\n')[:-1]: cmd = "kill -s 9 {}".format(pid) ret, _, _ = g.run(client, cmd) self.assertEqual(ret, 0, "Failed to kill test file execution") # Verify that the changes are successful on bricks and client self._verify_stat_info(nodes_to_check, test_file) # Verify there are no pending heals heal_info = get_heal_info_summary(self.mnode, self.volname) self.assertIsNotNone(heal_info, 'Unable to get heal info') for brick in bricks_list: self.assertEqual(int(heal_info[brick]['numberOfEntries']), 0, ("Pending heal on brick {} ".format(brick))) # Check for split-brain ret = is_volume_in_split_brain(self.mnode, self.volname) self.assertFalse(ret, 'Volume is in split-brain state') g.log.info('Volume is not in split-brain state') # Get arequal for mount ret, arequals = collect_mounts_arequal(self.mounts) self.assertTrue(ret, 'Failed to get arequal') mount_point_total = arequals[0].splitlines()[-1].split(':')[-1] # Collecting data bricks vol_info = get_volume_info(self.mnode, self.volname) self.assertIsNotNone(vol_info, 'Unable to get volume info') data_brick_list = [] for brick in bricks_list: for brick_info in vol_info[self.volname]["bricks"]["brick"]: if brick_info["name"] == brick: if brick_info["isArbiter"] == "0": data_brick_list.append(brick) bricks_list = data_brick_list # Get arequal on bricks and compare with mount_point_total # It should be the same arbiter = self.volume_type.find('arbiter') >= 0 subvols = get_subvols(self.mnode, self.volname)['volume_subvols'] stop = len(subvols[0]) - 1 if arbiter else len(subvols[0]) for subvol in subvols: subvol = [i for i in subvol if i in bricks_list] if subvol: ret, arequal = collect_bricks_arequal(subvol[0:stop]) self.assertTrue( ret, 'Unable to get arequal checksum ' 'on {}'.format(subvol[0:stop])) self.assertEqual( len(set(arequal)), 1, 'Mismatch of arequal ' 'checksum among {} is ' 'identified'.format(subvol[0:stop])) brick_total = arequal[-1].splitlines()[-1].split(':')[-1] self.assertEqual( brick_total, mount_point_total, "Arequals for mountpoint and {} " "are not equal".format(subvol[0:stop]))
def test_create_link_for_directory(self): m_point = self.mounts[0].mountpoint fqpath_for_test_dir = m_point + '/test_dir' flag = mkdir(self.clients[0], fqpath_for_test_dir, True) self.assertTrue(flag, "Failed to create a directory") fqpath = m_point + '/test_dir/dir{1..3}' flag = mkdir(self.clients[0], fqpath, True) self.assertTrue(flag, "Failed to create sub directories") flag = validate_files_in_dir(self.clients[0], fqpath_for_test_dir, test_type=k.TEST_LAYOUT_IS_COMPLETE) self.assertTrue(flag, "Layout of test directory is not complete") g.log.info("Layout for directory is complete") sym_link_path = m_point + '/' + 'test_sym_link' command = 'ln -s ' + fqpath_for_test_dir + ' ' + sym_link_path ret, _, _ = g.run(self.mounts[0].client_system, command) self.assertEqual(ret, 0, "Failed to create symlink for test_dir") command = 'stat ' + sym_link_path ret, out, _ = g.run(self.mounts[0].client_system, command) self.assertEqual(ret, 0, "Stat command didn't return the details " "correctly") flag = False if 'symbolic link' in out: flag = True self.assertTrue(flag, "The type of the link is not symbolic") g.log.info("The link is symbolic") flag = False if search(fqpath_for_test_dir, out): flag = True self.assertTrue(flag, "sym link does not point to correct " "location") g.log.info("sym link points to right directory") g.log.info("The details of the symlink are correct") command = 'ls -id ' + fqpath_for_test_dir + ' ' + sym_link_path ret, out, _ = g.run(self.mounts[0].client_system, command) self.assertEqual(ret, 0, "Inode numbers not retrieved by the " "ls command") list_of_inode_numbers = out.split('\n') if (list_of_inode_numbers[0].split(' ')[0] == list_of_inode_numbers[1].split(' ')[0]): flag = False self.assertTrue( flag, "The inode numbers of the dir and sym link " "are same") g.log.info("Verified: inode numbers of the test_dir " "and its sym link are different") command = 'ls ' + sym_link_path ret, out1, _ = g.run(self.mounts[0].client_system, command) self.assertEqual(ret, 0, "Failed to list the contents using the " "sym link") command = 'ls ' + fqpath_for_test_dir ret, out2, _ = g.run(self.mounts[0].client_system, command) self.assertEqual( ret, 0, "Failed to list the contents of the " "test_dir using ls command") flag = False if out1 == out2: flag = True self.assertTrue( flag, "The contents listed using the sym link " "are not the same") g.log.info("The contents listed using the symlink are" " the same as that of the test_dir") command = 'getfattr -d -m . -e hex ' + sym_link_path ret, out, _ = g.run(self.mounts[0].client_system, command) self.assertEqual(ret, 0, "failed to retrieve xattrs") list_xattrs = ['trusted.gfid', 'trusted.glusterfs.dht'] for xattr in list_xattrs: if xattr in out: flag = False self.assertTrue( flag, "Important xattrs are being compromised" " using the symlink at the mount point") g.log.info("Verified: mount point doesn't display important " "xattrs using the symlink") path_info_1 = get_pathinfo(self.mounts[0].client_system, fqpath_for_test_dir) path_info_2 = get_pathinfo(self.mounts[0].client_system, sym_link_path) if path_info_1 == path_info_2: flag = True self.assertTrue( flag, "Pathinfos for test_dir and its sym link " "are not same") g.log.info("Pathinfos for test_dir and its sym link are same") command = 'readlink ' + sym_link_path ret, out, _ = g.run(self.mounts[0].client_system, command) self.assertEqual(ret, 0, "readlink command returned an error") flag = False if out.rstrip() == fqpath_for_test_dir: flag = True self.assertTrue(flag, "readlink did not return the path of the " "test_dir") g.log.info("readlink successfully returned the path of the test_dir")