def test_b_run2threads(self): self.cleanup_files() write_pickle(self.params.param_pickle_path, self.params) t1 = TestThread(FsDriftWorkload(self.params), 'fsdthr-1') t2 = TestThread(FsDriftWorkload(self.params), 'fsdthr-2') verbosity_fn = os.path.join(self.params.network_shared_path, 'verbosity') with open(verbosity_fn, 'w') as vf: vf.write('0xffffffff') threads = [ t1, t2 ] for t in threads: t.start() mylog = fsd_log.start_log('run2threads') mylog.info('threads started') time.sleep(2) touch(self.params.starting_gun_path) mylog.info('starting gun fired') for t in threads: t.join() mylog.info('threads done') totals = FSOPCounters() for t in threads: print(t.worker.ctrs) t.worker.ctrs.add_to(totals) t.worker.chk_status() print('total counters:') print(totals)
def run_workload(): log = fsd_log.start_log('fs-drift') # if a --host-set parameter was passed, # it's a multi-host workload # each remote instance will wait # until all instances have reached starting gate try: params = opts.parseopts() params.validate() except FsDriftException as e: log.error(str(e)) log.info('use --help option to get CLI syntax') sys.exit(NOTOK) print(params) if os.getenv('DEBUG'): log.logLevel(logging.DEBUG) try: sync_files.create_top_dirs(params) except OSError as e: if e.errno != errno.EEXIST: raise FsDriftException( 'you must create the top-level directory %s' % params.top_directory) # put parameters where all threads can see them write_pickle(params.param_pickle_path, params) if params.host_set != [] and not params.is_slave: return run_multi_host_workload(params, log) return multi_thread_workload.run_multi_thread_workload(params)
def setUp(self): self.params = opts.parseopts() self.params.duration = 2 self.params.workload_table_csv_path = '/tmp/weights.csv' self.log = fsd_log.start_log('invoke_process')
# import sys import os import time import errno import logging import socket import argparse from fsd_log import start_log OK = 0 NOTOK = 1 verbose = (os.getenv("VERBOSE") != None) log = start_log('launcher', verbosity=verbose) def myabort(msg): log.error(msg) sys.exit(NOTOK) substitute_dir = None top_dir = None # get short hostname as_host = socket.gethostname().split('.')[0] parser = argparse.ArgumentParser( description='parse fs-drift/launch_daemon.py parameters') a = parser.add_argument
'read, 2', 'random_read, 2', 'random_write, 2', 'create, 6', 'truncate, 0.2', 'append, 4', 'delete, 0.2', 'hardlink, 0.3', 'softlink, 0.3', 'rename, 1', 'remount,0.01', ])) params = opts.parseopts() params.workload_table_csv_path = '/tmp/weights.csv' weights = {} log = fsd_log.start_log('fsdevent') weights = parse_weights(params) normalized_weights = normalize_weights(weights) print_weights(normalized_weights) opcode_count = len(FSOPCtx.opname_to_opcode.keys()) histogram = [0 for k in range(0, opcode_count)] # generate 10000 events and analyze frequency for i in range(0, 100000): opcode = gen_event(normalized_weights) histogram[opcode] += 1 # print out histogram results for k in range(0, opcode_count): try: name = FSOPCtx.opcode_to_opname[k]
except OSError as e: if e.errno == errno.ENOENT: c.e_dir_not_found += 1 else: return self.scallerr('readdir', dirpath, e) return OK # unit test if __name__ == "__main__": import logging import opts import fsd_log options = opts.parseopts() log = fsd_log.start_log('fsop-unittest') log.info('hi there') if not options.top_directory.__contains__('/tmp/'): raise FsDriftException('bad top directory') os.system('rm -rf %s' % options.top_directory) os.makedirs(options.top_directory) os.chdir(options.top_directory) log.info('chdir to %s' % options.top_directory) ctrs = FSOPCounters() ctx = FSOPCtx(options, log, ctrs, 'test-host', 'test-tid') ctx.verbosity = -1 rc = ctx.op_create() assert (rc == OK) rc = ctx.op_read() assert (rc == OK) rc = ctx.op_random_read()
def start_log(self): self.log = fsd_log.start_log('thrd.%s' % self.tid, verbosity = self.verbosity)
def terminate(self): if self.popen_obj != None: try: self.popen_obj.terminate() except OSError as e: if e.errno != errno.ESRCH: raise e self.log.debug('tried to kill non existent process %d', self.popen_obj.pid) self.status = NOTOK if __name__ == '__main__': import unittest2 log = start_log('ssh_thread') class Test(unittest2.TestCase): def setUp(self): pass def test_a_mkThrd(self): sthrd = ssh_thread(log, 'localhost', 'sleep 1') sthrd.start() sthrd.join() if sthrd.status != OK: raise FsDriftException('return status %d' % sthrd.status) def test_b_abortThrd(self): sthrd = ssh_thread(log, 'localhost', 'sleep 60') sthrd.start()
def run_multi_thread_workload(prm): host = prm.as_host if host == None: host = 'localhost' prm_slave = (prm.host_set != []) # FIXME: get coherent logging level interface verbose = os.getenv('LOGLEVEL_DEBUG' != None) host_startup_timeout = 5 + len(prm.host_set) / 3 # for each thread set up SmallfileWorkload instance, # create a thread instance, and delete the thread-ready file thread_list = create_worker_list(prm) my_host_invoke = thread_list[0].invoke my_log = fsd_log.start_log('%s.master' % host) my_log.debug(prm) # start threads, wait for them to reach starting gate # to do this, look for thread-ready files for t in thread_list: ensure_deleted(t.invoke.gen_thread_ready_fname(t.invoke.tid)) for t in thread_list: t.start() my_log.debug('started %d worker threads on host %s' % (len(thread_list), host)) # wait for all threads to reach the starting gate # this makes it more likely that they will start simultaneously abort_fname = prm.abort_path thread_count = len(thread_list) thread_to_wait_for = 0 startup_timeout = 3 sec = 0.0 while sec < startup_timeout: for k in range(thread_to_wait_for, thread_count): t = thread_list[k] fn = t.invoke.gen_thread_ready_fname(t.invoke.tid) if not os.path.exists(fn): my_log.debug('thread %d thread-ready file %s not found yet with %f sec left' % (k, fn, (startup_timeout - sec))) break thread_to_wait_for = k + 1 # we only timeout if no more threads have reached starting gate # in startup_timeout sec sec = 0.0 if thread_to_wait_for == thread_count: break if os.path.exists(abort_fname): break sec += 0.5 time.sleep(0.5) # if all threads didn't make it to the starting gate if thread_to_wait_for < thread_count: abort_test(abort_fname, thread_list) raise FsDriftException('only %d threads reached starting gate' % thread_to_wait_for) # declare that this host is at the starting gate if prm_slave: host_ready_fn = gen_host_ready_fname(prm, prm.as_host) my_log.debug('host %s creating ready file %s' % (my_host_invoke.onhost, host_ready_fn)) common.touch(host_ready_fn) sg = prm.starting_gun_path if not prm_slave: my_log.debug('wrote starting gate file ') sync_files.write_sync_file(sg, 'hi there') # wait for starting_gate file to be created by test driver # every second we resume scan from last host file not found if prm_slave: my_log.debug('awaiting ' + sg) for sec in range(0, host_startup_timeout+3): # hack to ensure that directory is up to date # ndlist = os.listdir(my_host_invoke.network_dir) # if verbose: print(str(ndlist)) if os.path.exists(sg): break if os.path.exists(prm.abort_path): log.info('saw abort file %s, aborting test' % prm.abort_path) break time.sleep(1) if not os.path.exists(sg): abort_test(prm.abort_path, thread_list) raise Exception('starting signal not seen within %d seconds' % host_startup_timeout) if verbose: print('starting test on host ' + host + ' in 2 seconds') time.sleep(2 + random.random()) # let other hosts see starting gate file # FIXME: don't timeout the test, # instead check thread progress and abort if you see any of them stalled # but if servers are heavily loaded you can't rely on filesystem # wait for all threads on this host to finish for t in thread_list: my_log.debug('waiting for thread %s' % t.invoke.tid) t.retrieve() t.join() # if not a slave of some other host, print results (for this host) if not prm_slave: try: worker_list = [ t.invoke for t in thread_list ] output_results.output_results(prm, worker_list) except FsDriftException as e: print('ERROR: ' + str(e)) return NOTOK else: # if we are participating in a multi-host test # then write out this host's result in pickle format # so test driver can pick up result result_filename = host_result_filename(prm, prm.as_host) my_log.debug('saving result to filename %s' % result_filename) worker_list = [ t.invoke for t in thread_list ] sync_files.write_pickle(result_filename, worker_list) time.sleep(1.2) # for benefit of NFS with actimeo=1 return OK