def attr_ready(): try: utils.run_cmd("which attr", expected_rc=0) except Exception as e: # logger.warning(e) logger.warning("attr not installed.(apt-get install -y attr)") return True
def fio_ready(): try: utils.run_cmd("which fio", expected_rc=0) except Exception as e: # logger.warning(e) logger.warning("fio not installed.(apt-get install -y fio)") return True
def prove_ready(): try: utils.run_cmd('prove -h', expected_rc=1) except Exception as e: # logger.warning(e) logger.warning("perl not installed.(yum install perl-Test-Harness)") return False return True
def filebench_ready(): try: utils.run_cmd('which filebench', expected_rc=0) except Exception as e: # logger.warning(e) logger.warning("filebench not installed.(yum install -y filebench)") return False return True
def verify(self): if os.name != "posix": raise PlatformError("Just support for linux machine!") if not os.path.isdir(self.top_path): raise NoSuchDir(self.top_path) try: utils.run_cmd("which attr", expected_rc=0) except Exception as e: logger.error(e) raise NoSuchBinary( "attr, try install it.(apt-get install -y attr)")
def run(self, test_path): """ RUN LOCAL: ./locktests -n <number of concurent process> -f <test file> [-T] eg: ./locktests -n 50 -f /file/system/to/test """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) cur_dir = os.path.dirname(os.path.realpath(__file__)) locktest_bin = os.path.join(cur_dir, 'bin/locktests') test_log = os.path.join(self.top_path, 'locktests.log') locktest_cmd = '{0} -n 50 -f {1}/locktest.dat | tee -a {2}'.format( locktest_bin, test_path, test_log) try: os.system('chmod +x {0}*'.format(locktest_bin)) rc, output = utils.run_cmd(locktest_cmd) logger.info(output) logger.info("Complete: Run locktests on {0}".format(test_path)) except Exception as e: logger.info("FAIL: Run locktests on {0}".format(test_path)) raise e finally: pass return True
def run(self, test_path): """ cd /path/to/file/system/you/want/to/test/ prove -r /path/to/fstest/ """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) cur_dir = os.path.dirname(os.path.realpath(__file__)) bin_path = os.path.join(cur_dir, 'bin') test_log = os.path.join(self.top_path, 'fstest.log') fstest_cmd = 'cd {0}; prove -v -f -o -r {1} | tee -a {2}'.format( test_path, bin_path, test_log) try: os.system('chmod +x {0}/*'.format(bin_path)) rc, output = utils.run_cmd(fstest_cmd) logger.info(output) if "All tests successful" not in output: raise Exception("FAIL: Run fstest on {0}".format(test_path)) logger.info("Complete: Run fstest on {0}".format(test_path)) except Exception as e: logger.info("FAIL: Run fstest on {0}".format(test_path)) raise e finally: pass return True
class FilebenchTestCase(unittest.TestCase): _test_path = "/tmp" # Verify if os.name != "posix": raise PlatformError("fs_test just support for linux machine!") if not os.path.isdir(_test_path): raise NoSuchDir(_test_path) rc, output = utils.run_cmd('which filebench') if not output.strip("\n") or 'no filebench' in output: logger.warning("yum install filebench -y") raise NoSuchBinary("filebench not installed") def setUp(self): logger.info("Filebench Test Start ...") def tearDown(self): logger.info("Filebench Test Complete!") fb_parameterized = [] fb_test = FileBench(_test_path) logger.info(fb_test.__doc__) for test in fb_test.tests_generator(): p = param(test.name, test) fb_parameterized.append(p) @parameterized.expand(fb_parameterized, name_func=custom_name_func()) def test_filebench(self, _, test): self.fb_test.run(test)
def run(self, test): logger.info(test.desc) self.verify() test_name = test.name test_path = test.test_path binary_path = test.bin_path fail_flag = test.fail_flag test_log = os.path.join(self.top_path, '{0}.log'.format(test_name)) test_cmd = "{0} | tee -a {1}".format(test.command, test_log) utils.mkdir_path(test_path) try: if binary_path: os.system('chmod +x {0}/*'.format(binary_path)) rc, output = utils.run_cmd(test_cmd, timeout=72000) logger.info(output) if fail_flag and fail_flag in output: raise Exception("FAIL: Run {0} on {1}".format( test_name, test_path)) pass_flag = self.result_analyze(test, output, self.expect_failures) if pass_flag: logger.info("PASS: Run {0} on {1}".format( test_name, test_path)) else: logger.info("FAIL: Run {0} on {1}".format( test_name, test_path)) return pass_flag except Exception as e: logger.info("FAIL: Run {0} on {1}".format(test_name, test_path)) raise e finally: pass return True
def run(self, test_path, loops=5, runtime=10): """ Usage: -h Show this help screen -i n Execute test n times -I x Execute test for x seconds """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) cur_dir = os.path.dirname(os.path.realpath(__file__)) bin_path = os.path.join(cur_dir, 'bin') test_log = os.path.join(self.top_path, 'stream.log') try: os.system('chmod +x {0}/*'.format(bin_path)) for x in range(1, 6): stream_bin = os.path.join(bin_path, 'stream0{}'.format(x)) stream_cmd = "cd {0}; {1} -i {2} -I {3} | tee {4}".format( test_path, stream_bin, loops, runtime, test_log) rc, output = utils.run_cmd(stream_cmd) logger.info(output) if "TFAIL" in output: raise Exception("FAIL: Run stream0{0} test on {1}".format(x, test_path)) logger.info("PASS: Run stream test on {0}".format(test_path)) except Exception as e: logger.info("FAIL: Run stream test on {0}".format(test_path)) raise e finally: pass return True
def run(self, test_path): """cd <test_path>; ./tacl_xattr.sh """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) acl_bin = os.path.join(bin_path, 'tacl_xattr.sh') test_log = os.path.join(self.top_path, 'tacl_xattr.log') acl_cmd = "rm -rf {0}/*; cd {0}; {1} | tee {2}".format( test_path, acl_bin, test_log) try: os.system('chmod +x {0}/*'.format(bin_path)) rc, output = utils.run_cmd(acl_cmd, expected_rc="ignore") logger.info(output) if rc != 0: raise Exception("tacl_xattr.sh exit with !0") if "FAILED:" in output: raise Exception("FAIL: test acl_xattr on {}".format(test_path)) logger.info("PASS: test acl_xattr on {}".format(test_path)) except Exception as e: logger.info("FAIL: test acl_xattr on {}".format(test_path)) raise e finally: pass return True
def run(self, test_path, nops=100, nproc=10, loops=1): """ -d dir specifies the base directory for operations -n nops specifies the no. of operations per process (default 1) -p nproc specifies the no. of processes (default 1) -l loops specifies the no. of times the testrun should loop -c specifies not to remove files(cleanup) after execution -r specifies random name padding """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) cur_dir = os.path.dirname(os.path.realpath(__file__)) fsstress_bin = os.path.join(cur_dir, 'bin/fsstress') test_log = os.path.join(self.top_path, 'fsstress.log') fsstress_cmd = "{0} -d {1} -l {2} -n {3} -p {4} -v -w -r -c | tee -a {5}".format( fsstress_bin, test_path, str(loops), str(nops), str(nproc), test_log) try: os.system('chmod +x {0}*'.format(fsstress_bin)) rc, output = utils.run_cmd(fsstress_cmd) logger.info(output) logger.info("PASS: Run fsstress on {0}".format(test_path)) except Exception as e: logger.info("FAIL: Run fsstress on {0}".format(test_path)) raise e finally: pass return True
def _is_path_exist(self, path): ls_cmd = 'ls {0}'.format(path) rc, output = utils.run_cmd(ls_cmd, expected_rc='ignore') if 'No such file or directory' in output: logger.warning(output) return False else: logger.info(output) return True
def verify(self): if os.name != "posix": raise PlatformError("fs_test just support for linux machine!") if not os.path.isdir(self.top_path): raise NoSuchDir(self.top_path) rc, output = utils.run_cmd('which filebench') if not output.strip("\n") or 'no filebench' in output: logger.warning("yum install filebench -y") raise NoSuchBinary("filebench not installed")
def verify(self): if os.name != "posix": raise PlatformError("smbtorture just support for linux machine!") if not os.path.isdir(self.top_path): raise NoSuchDir(self.top_path) rc, output = utils.run_cmd('which smbtorture') if not output.strip("\n") or 'no smbtorture' in output: logger.warning("yum install smbtorture -y") raise NoSuchBinary("smbtorture not installed")
def clean_up(rm_path, rm_file): """ rm path_name/dd_r_* :return: """ logger.info('>> Clean up files ...') rm_dd = "rm -f {0}/{1}*".format(rm_path, rm_file) rc, output = utils.run_cmd(rm_dd, 0) return rc, output
def sync_dropcache(): """ sync; echo 3 > /proc/sys/vm/drop_caches :return: """ logger.info('>> sync and clean drop_caches') sync_cmd = "sync; echo 3 > /proc/sys/vm/drop_caches" rc, output = utils.run_cmd(sync_cmd, 0) return rc, output
def _mknod_device(self, device): """ mknod /dev/dpl1 b 44 16 :param device: :return: """ minor = int(re.search(r'\d+$', device).group()) * 16 cmd = 'mknod {0} b 44 {1}'.format(device, minor) rc, output = utils.run_cmd(cmd, expected_rc=0) logger.info(output) return rc
def umount_fs(self, path, options=''): """ Unmount filesystems. :param path:explicitly <source> | <directory> :param options: eg:-a -l -f, details FYI: umount -h :return: """ cmd = 'umount {0} {1}'.format(options, path) rc, output = utils.run_cmd(cmd, expected_rc=0, tries=3) logger.info(output) return rc
def get_mount_point(self, source, target, types='ext4'): """ Get the mount point by source and types :param source: :param target: :param types: :return: """ cmd = "/bin/mount | grep %s | grep %s | grep %s | awk '{print $3}'" % ( source, target, types) rc, output = utils.run_cmd(cmd, expected_rc='ignore') return output.strip('\n')
def new_workload(self, workload_conf_template): logger.info('Load: {0}'.format(workload_conf_template)) conf_name = os.path.split(workload_conf_template)[-1] tmp_path = os.path.join(os.getcwd(), 'tmp') utils.mkdir_path(tmp_path) test_conf_name = 'filebench-{0}'.format(conf_name) test_conf = os.path.join(tmp_path, test_conf_name) rc, output = utils.run_cmd('cp {0} {1}'.format(workload_conf_template, test_conf)) print(output) # modify the tmp workload conf file: dir config_cmd = "sed -i 's/set \$dir=\/tmp/set \$dir={test_path}/g' {test_conf}".format( test_path=self.test_path.replace('/', '\/'), test_conf=test_conf) rc, output = utils.run_cmd(config_cmd) print(output) # add run time with open(test_conf, "a+") as f: f_text = f.read() if f_text.find('run 60') == -1: f.write('run 60\n') return test_conf
def mount_fs(self, source, target, types, options='discard'): """ mount a filesystem. :param source:explicitly specifies source (path, label, uuid) :param target:explicitly specifies mountpoint :param types:<-t> limit the set of filesystem types :param options:<-o> comma-separated list of mount options :return: """ # if the mount point path not exist, will create a new one self._validate_directory(target) cmd = 'mount -t %s -o %s %s %s ' % (types, options, source, target) rc, output = utils.run_cmd(cmd, expected_rc=0, tries=3) logger.info(output) return rc
def mkfs_filesystem(self, device, types='ext4', options='-F -b4096 -E nodiscard'): """ Make a Linux filesystem. :param device:explicitly specifies device path, eg: /dev/dpl1 :param types: :param options: :return: """ # mkfs.ext4 -F -b4096 -E nodiscard /dev/dpl1 cmd = 'mkfs.{0} {1} {2}'.format(types, options, device) rc, output = utils.run_cmd(cmd, expected_rc=0) logger.info(output) return rc
def run(self, test_path, loops=5, runtime=10): """ Usage: ./read_all -d /mnt/test/ -I 30 -v -h Options ------- -h Prints this help -i n Execute test n times -I x Execute test for n seconds -C ARG Run child process with ARG arguments (used internally) -v Print information about successful reads. -q Don't print file read or open errors. -d path Path to the directory to read from, defaults to /sys. -e pattern Ignore files which match an 'extended' pattern, see fnmatch(3). -r count The number of times to schedule a file for reading. -w count Set the worker count limit, the default is 15. -W count Override the worker count. Ignores (-w) and the processor count. -p Drop privileges; switch to the nobody user. """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) cur_dir = os.path.dirname(os.path.realpath(__file__)) readall_bin = os.path.join(cur_dir, 'bin/read_all') test_log = os.path.join(self.top_path, 'read_all.log') readall_cmd = "{0} -d {1} -i {2} -I {3} -v | tee {4}".format( readall_bin, test_path, loops, runtime, test_log) try: os.system('chmod +x {0}*'.format(readall_bin)) rc, output = utils.run_cmd(readall_cmd) logger.info(output) if "TFAIL" in output: raise Exception("FAIL: Run read_all test on {0}".format(test_path)) logger.info("PASS: Run read_all test on {0}".format(test_path)) except Exception as e: logger.info("FAIL: Run read_all test on {0}".format(test_path)) raise e finally: pass return True
def get_default_tests(): """Get the default test case list by command""" case_info = defaultdict(list) rc, output = utils.run_cmd("smbtorture", expected_rc=0) lines = output.split('Tests are:')[1].strip('\r\n').split('\n') # logger.info(line_list) pattern_suite = r'\((\S*)\):' suite = "" for line in lines: if not line or line == '\r' or "The default test is ALL" in line: continue match_suite = re.findall(pattern_suite, line) if match_suite: suite = match_suite[0] elif suite: case_info[suite].extend(line.strip(' \r').split(' ')) else: pass return case_info
def run(self, test_path, f_size, p_size, loops=1): """ usage: ./fs_di -d TMPDIR [-h] [-l # of LOOPS ] [-s SIZE in Mb][-S partition SIZE in Mb] -d TMPDIR Directory where temporary files will be created. -h Help. Prints all available options. -l # of LOOPS The number of times to run the test. Default=10. -s SIZE in Mb The size of the data file to create. Default=30Mb. A "0" means random sizes from 10-500Mb. -S SIZE in Mb Size of usable partition (in MBs) on which the testing is carried out (needs to be passed for fragmented file test) -v Verbose output. example: ./fs_di -d /mnt/cifsmount -l 20 -s 100 -S 200 example: ./fs_di -d /mnt/cifsmount -l 20 -s 100 """ logger.info(self.run.__doc__) utils.mkdir_path(test_path) cur_dir = os.path.dirname(os.path.realpath(__file__)) bin_path = os.path.join(cur_dir, 'bin') fs_di_bin = os.path.join(bin_path, 'fs_di') test_log = os.path.join(self.top_path, 'fs_data_integrity.log') test_cmd = "{0} -d {1} -l {2} -s {3} -S {4} | tee -a {5}".format( fs_di_bin, test_path, loops, f_size, p_size, test_log) try: os.system('chmod +x {0}/*'.format(bin_path)) rc, output = utils.run_cmd(test_cmd) logger.info('\n'.format(output.strip('\n'))) if "Test failed" in output: raise Exception( "FAIL: Run fs_data_integrity on {}".format(test_path)) logger.info("PASS: Run fs_data_integrity on {}".format(test_path)) except Exception as e: logger.info("FAIL: Run fs_data_integrity on {}".format(test_path)) raise e finally: pass return True
def get_all_devices(self, pattern='/dev/*'): rc, output = utils.run_cmd('ls %s' % pattern, expected_rc='ignore') logger.info(output) device_list = output.strip('\n').split(' ') return device_list
def _validate_directory(self, directory): if not self._is_path_exist(directory): logger.info('Create a new one') mkdir_cmd = 'mkdir -p {0}'.format(directory) utils.run_cmd(mkdir_cmd, expected_rc=0)