def run(self, test_path): """ RUN LOCAL: ./locktests -n <number of concurent process> -f <test file> [-T] eg: ./locktests -n 50 -f /file/system/to/test """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) cur_dir = os.path.dirname(os.path.realpath(__file__)) locktest_bin = os.path.join(cur_dir, 'bin/locktests') test_log = os.path.join(self.top_path, 'locktests.log') locktest_cmd = '{0} -n 50 -f {1}/locktest.dat | tee -a {2}'.format( locktest_bin, test_path, test_log) try: os.system('chmod +x {0}*'.format(locktest_bin)) rc, output = utils.run_cmd(locktest_cmd) logger.info(output) logger.info("Complete: Run locktests on {0}".format(test_path)) except Exception as e: logger.info("FAIL: Run locktests on {0}".format(test_path)) raise e finally: pass return True
def run(self, test_path): """cd <test_path>; ./tacl_xattr.sh """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) acl_bin = os.path.join(bin_path, 'tacl_xattr.sh') test_log = os.path.join(self.top_path, 'tacl_xattr.log') acl_cmd = "rm -rf {0}/*; cd {0}; {1} | tee {2}".format( test_path, acl_bin, test_log) try: os.system('chmod +x {0}/*'.format(bin_path)) rc, output = utils.run_cmd(acl_cmd, expected_rc="ignore") logger.info(output) if rc != 0: raise Exception("tacl_xattr.sh exit with !0") if "FAILED:" in output: raise Exception("FAIL: test acl_xattr on {}".format(test_path)) logger.info("PASS: test acl_xattr on {}".format(test_path)) except Exception as e: logger.info("FAIL: test acl_xattr on {}".format(test_path)) raise e finally: pass return True
def run(self, test_path): """ cd /path/to/file/system/you/want/to/test/ prove -r /path/to/fstest/ """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) cur_dir = os.path.dirname(os.path.realpath(__file__)) bin_path = os.path.join(cur_dir, 'bin') test_log = os.path.join(self.top_path, 'fstest.log') fstest_cmd = 'cd {0}; prove -v -f -o -r {1} | tee -a {2}'.format( test_path, bin_path, test_log) try: os.system('chmod +x {0}/*'.format(bin_path)) rc, output = utils.run_cmd(fstest_cmd) logger.info(output) if "All tests successful" not in output: raise Exception("FAIL: Run fstest on {0}".format(test_path)) logger.info("Complete: Run fstest on {0}".format(test_path)) except Exception as e: logger.info("FAIL: Run fstest on {0}".format(test_path)) raise e finally: pass return True
def run(self, test): logger.info(test.desc) self.verify() test_name = test.name test_path = test.test_path binary_path = test.bin_path fail_flag = test.fail_flag test_log = os.path.join(self.top_path, '{0}.log'.format(test_name)) test_cmd = "{0} | tee -a {1}".format(test.command, test_log) utils.mkdir_path(test_path) try: if binary_path: os.system('chmod +x {0}/*'.format(binary_path)) rc, output = utils.run_cmd(test_cmd, timeout=72000) logger.info(output) if fail_flag and fail_flag in output: raise Exception("FAIL: Run {0} on {1}".format( test_name, test_path)) pass_flag = self.result_analyze(test, output, self.expect_failures) if pass_flag: logger.info("PASS: Run {0} on {1}".format( test_name, test_path)) else: logger.info("FAIL: Run {0} on {1}".format( test_name, test_path)) return pass_flag except Exception as e: logger.info("FAIL: Run {0} on {1}".format(test_name, test_path)) raise e finally: pass return True
def run(self, test_path, nops=100, nproc=10, loops=1): """ -d dir specifies the base directory for operations -n nops specifies the no. of operations per process (default 1) -p nproc specifies the no. of processes (default 1) -l loops specifies the no. of times the testrun should loop -c specifies not to remove files(cleanup) after execution -r specifies random name padding """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) cur_dir = os.path.dirname(os.path.realpath(__file__)) fsstress_bin = os.path.join(cur_dir, 'bin/fsstress') test_log = os.path.join(self.top_path, 'fsstress.log') fsstress_cmd = "{0} -d {1} -l {2} -n {3} -p {4} -v -w -r -c | tee -a {5}".format( fsstress_bin, test_path, str(loops), str(nops), str(nproc), test_log) try: os.system('chmod +x {0}*'.format(fsstress_bin)) rc, output = utils.run_cmd(fsstress_cmd) logger.info(output) logger.info("PASS: Run fsstress on {0}".format(test_path)) except Exception as e: logger.info("FAIL: Run fsstress on {0}".format(test_path)) raise e finally: pass return True
def create_sub_dirs(self, drive, parent_dir): """Created subdir inside the parent_dir""" sub_dir_name = "SubDir_" + time.strftime("%H%M%S") dir_path = os.path.join(drive, parent_dir, sub_dir_name) utils.mkdir_path(dir_path) tmp_path = os.path.join(parent_dir, sub_dir_name) self.SubDirs.append(tmp_path)
def run(self, test_path, f_num, f_size): """ create_file <# of 1048576 buffers to write> <name of file to create> ex. # create_file 10 /tmp/testfile f_size: MB """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) cur_dir = os.path.dirname(os.path.realpath(__file__)) bin_path = os.path.join(cur_dir, 'bin') cdf_bin = os.path.join(bin_path, 'create_datafile') try: os.system('chmod +x {0}/*'.format(bin_path)) pool = ThreadPoolExecutor(max_workers=8) futures = [] for x in range(0, f_num): f_name = os.path.join(test_path, "file_{}".format(x)) test_cmd = "{0} {1} {2}".format(cdf_bin, f_size, f_name) futures.append(pool.submit(utils.run_cmd, test_cmd, expected_rc='ignore')) pool.shutdown() future_result = [future.result()[0] for future in futures] result = False if -1 in future_result else True assert result logger.info("PASS: create {0} datafile on {1}".format(f_num, test_path)) except Exception as e: logger.info("FAIL: create datafile on {}".format(test_path)) raise e finally: pass return True
def run(self, test_path, loops=5, runtime=10): """ Usage: -h Show this help screen -i n Execute test n times -I x Execute test for x seconds """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) cur_dir = os.path.dirname(os.path.realpath(__file__)) bin_path = os.path.join(cur_dir, 'bin') test_log = os.path.join(self.top_path, 'stream.log') try: os.system('chmod +x {0}/*'.format(bin_path)) for x in range(1, 6): stream_bin = os.path.join(bin_path, 'stream0{}'.format(x)) stream_cmd = "cd {0}; {1} -i {2} -I {3} | tee {4}".format( test_path, stream_bin, loops, runtime, test_log) rc, output = utils.run_cmd(stream_cmd) logger.info(output) if "TFAIL" in output: raise Exception("FAIL: Run stream0{0} test on {1}".format(x, test_path)) logger.info("PASS: Run stream test on {0}".format(test_path)) except Exception as e: logger.info("FAIL: Run stream test on {0}".format(test_path)) raise e finally: pass return True
def create_dirs(self, drive, dirs_num, name_prefix="Dir"): """method to create number of dirs and save dir_name in a list""" for x in range(dirs_num): dir_name = "{0}_{1}_{2}".format(name_prefix, time.strftime("%H%M%S"), x) dir_path = os.path.join(drive, dir_name) utils.mkdir_path(dir_path) self.Dirs.append(dir_name)
def stress(self, dir_n=100): """100dirs*100files*(1~100MB)*""" self.verify() test_top_path = os.path.join(self.top_path, 'create_files') for x in range(0, dir_n): test_path = os.path.join(test_top_path, "dir_{}".format(x)) utils.mkdir_path(test_path) assert self.run(test_path, 100, random.randint(1, 100)) return True
def setUp(self): self.phase_list.append([self.id().split('.')[-1], "Start", '', self.shortDescription()]) self.start_time = datetime.now() self.print_phase() fs_path = self.args[0].test_path if not os.path.isdir(fs_path): raise NoSuchDir(fs_path) self.test_path = os.path.join(fs_path, "sanity_{0}_{1}".format(self.str_time, self.tc_loop[self.id()])) utils.mkdir_path(self.test_path)
def run(self, test_path, dirs_num=10, nested_level=10, files_num=100, file_size='1K'): utils.mkdir_path(test_path) try: # Create dirs self.test_create_dirs(test_path, dirs_num) # Create sub dirs self.test_create_sub_dirs(dirs_num) # Create nested dirs self.test_create_nested_dirs(nested_level) # Create files self.test_create_files(files_num, file_size) # Create large files self.test_create_large_files(random.randint(1, 3), "100M") # List dirs self.test_list_dirs() # Modify files self.test_check_files_md5() self.test_modify_files() # Rename files self.test_check_files_md5() self.test_rename_files() self.test_check_files_md5() # Delete some of files self.test_delete_files() # Rename dirs self.test_rename_dirs() # rmtree dir_path self.test_remove_dirs() logger.info( "PASS: Run FileOps test on local FS {0}".format(test_path)) except Exception as e: logger.info( "FAIL: Run FileOps test on local FS {0}".format(test_path)) raise e finally: pass return True
def create_dirs(self, parent_path, dirs_num, name_prefix="Dir"): """method to create number of dirs and return dir_names list""" logger.info("Create {0} dirs under path: {1}".format( dirs_num, parent_path)) dir_path_list = [] for x in range(dirs_num): dir_name = "{0}_{1}".format(name_prefix, x) dir_path = os.path.join(parent_path, dir_name) utils.mkdir_path(dir_path) dir_path_list.append(dir_path) return dir_path_list
def stress(self): self.verify() stress_path = os.path.join(self.top_path, "fsstress") pool = ThreadPoolExecutor(max_workers=8) futures = [] for x in range(1, 5): test_path = os.path.join(stress_path, "dir_".format(x)) utils.mkdir_path(test_path) futures.append(pool.submit(self.run, test_path, nops=1000, nproc=50, loops=3)) pool.shutdown() future_result = [future.result() for future in futures] result = False if False in future_result else True return result
def stress(self): self.verify() stress_path = os.path.join(self.top_path, "fstest") pool = ThreadPoolExecutor(max_workers=4) futures = [] for x in range(1, 50): test_path = os.path.join(stress_path, str(x)) utils.mkdir_path(test_path) futures.append(pool.submit(self.run, test_path)) pool.shutdown() future_result = [future.result() for future in futures] result = False if False in future_result else True return result
def create_nested_dirs(self, parent_path, levels): tmp = [[self.TopLevelDir]] # temp list storing the TopLevel dir dir_nested_path = [] for i in range(levels): tmp.append(["S_" + str(i)]) for item in itertools.product(*tmp): dir_path = os.path.join(parent_path, *item) utils.mkdir_path(dir_path) tmp.append(dir_path) dir_nested_path = os.path.join(*item) self.Dirs.append(self.TopLevelDir) self.NestedDirs.append(dir_nested_path)
def create_nested_dirs(self, parent_path, level): logger.info("Create nested dirs under path: {0}, level={1}".format( parent_path, level)) tmp = [[parent_path]] # temp list storing the TopLevel dir nested_dir_path = [] for i in range(level): tmp.append(["Nested_" + str(i)]) for item in itertools.product(*tmp): dir_path = os.path.join(parent_path, *item) utils.mkdir_path(dir_path) tmp.append(dir_path) nested_dir_path.append(os.path.join(*item)) return nested_dir_path
def create(test_path, f_num, f_size): utils.mkdir_path(test_path) start = time.time() for idx in range(0, int(f_num)): f = open(test_path + "/test_" + str(idx) + ".txt", "w") for line in range(0, 105 * int(f_size)): f.write(str(idx) + " " + str(line) + " line\n") f.close() end = time.time() during = end - start logger.info("{0}: create {1} file(s), time: {2}(seconds)".format( test_path, f_num, during)) return True
def test_large_files(self): """Generate large files by LocalFileOps""" logger.info(self.test_large_files.__doc__) from storagetest.pkgs.fileops import LocalFileOps fops = LocalFileOps(self.test_path) test_top_path = os.path.join(self.test_path, 'large_files') for x in range(0, self.dir_n): dir_path = os.path.join(test_top_path, 'dir_{0}'.format(x)) utils.mkdir_path(dir_path) for n in range(0, self.file_n): file_path = os.path.join(dir_path, 'file-{0}.dat'.format(x)) f_size_min, f_size_max = utils.to_int_list( self.file_size_range) self.assertTrue( fops.create_large_size_file(file_path, f_size_max))
def run(self, test_path, loops=5, runtime=10): """ Usage: ./read_all -d /mnt/test/ -I 30 -v -h Options ------- -h Prints this help -i n Execute test n times -I x Execute test for n seconds -C ARG Run child process with ARG arguments (used internally) -v Print information about successful reads. -q Don't print file read or open errors. -d path Path to the directory to read from, defaults to /sys. -e pattern Ignore files which match an 'extended' pattern, see fnmatch(3). -r count The number of times to schedule a file for reading. -w count Set the worker count limit, the default is 15. -W count Override the worker count. Ignores (-w) and the processor count. -p Drop privileges; switch to the nobody user. """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) cur_dir = os.path.dirname(os.path.realpath(__file__)) readall_bin = os.path.join(cur_dir, 'bin/read_all') test_log = os.path.join(self.top_path, 'read_all.log') readall_cmd = "{0} -d {1} -i {2} -I {3} -v | tee {4}".format( readall_bin, test_path, loops, runtime, test_log) try: os.system('chmod +x {0}*'.format(readall_bin)) rc, output = utils.run_cmd(readall_cmd) logger.info(output) if "TFAIL" in output: raise Exception("FAIL: Run read_all test on {0}".format(test_path)) logger.info("PASS: Run read_all test on {0}".format(test_path)) except Exception as e: logger.info("FAIL: Run read_all test on {0}".format(test_path)) raise e finally: pass return True
def run(self, test_path, f_size, p_size, loops=1): """ usage: ./fs_di -d TMPDIR [-h] [-l # of LOOPS ] [-s SIZE in Mb][-S partition SIZE in Mb] -d TMPDIR Directory where temporary files will be created. -h Help. Prints all available options. -l # of LOOPS The number of times to run the test. Default=10. -s SIZE in Mb The size of the data file to create. Default=30Mb. A "0" means random sizes from 10-500Mb. -S SIZE in Mb Size of usable partition (in MBs) on which the testing is carried out (needs to be passed for fragmented file test) -v Verbose output. example: ./fs_di -d /mnt/cifsmount -l 20 -s 100 -S 200 example: ./fs_di -d /mnt/cifsmount -l 20 -s 100 """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) cur_dir = os.path.dirname(os.path.realpath(__file__)) bin_path = os.path.join(cur_dir, 'bin') fs_di_bin = os.path.join(bin_path, 'fs_di') test_log = os.path.join(self.top_path, 'fs_data_integrity.log') test_cmd = "{0} -d {1} -l {2} -s {3} -S {4} | tee -a {5}".format( fs_di_bin, test_path, loops, f_size, p_size, test_log) try: os.system('chmod +x {0}/*'.format(bin_path)) rc, output = utils.run_cmd(test_cmd) logger.info('\n'.format(output.strip('\n'))) if "Test failed" in output: raise Exception( "FAIL: Run fs_data_integrity on {}".format(test_path)) logger.info("PASS: Run fs_data_integrity on {}".format(test_path)) except Exception as e: logger.info("FAIL: Run fs_data_integrity on {}".format(test_path)) raise e finally: pass return True
def new_workload(self, workload_conf_template): logger.info('Load: {0}'.format(workload_conf_template)) conf_name = os.path.split(workload_conf_template)[-1] tmp_path = os.path.join(os.getcwd(), 'tmp') utils.mkdir_path(tmp_path) test_conf_name = 'filebench-{0}'.format(conf_name) test_conf = os.path.join(tmp_path, test_conf_name) rc, output = utils.run_cmd('cp {0} {1}'.format(workload_conf_template, test_conf)) print(output) # modify the tmp workload conf file: dir config_cmd = "sed -i 's/set \$dir=\/tmp/set \$dir={test_path}/g' {test_conf}".format( test_path=self.test_path.replace('/', '\/'), test_conf=test_conf) rc, output = utils.run_cmd(config_cmd) print(output) # add run time with open(test_conf, "a+") as f: f_text = f.read() if f_text.find('run 60') == -1: f.write('run 60\n') return test_conf