def main(argv): opts.parseopts(argv) event.parse_weights() event.normalize_weights() global total_errors total_errors = 0 try: os.mkdir(opts.top_directory) except os.error as e: if e.errno != errno.EEXIST: raise e sys.stdout.flush() last_stat_time = time.perf_counter() last_drift_time = time.perf_counter() stop_file = opts.top_directory + os.sep + 'stop-file' # we have to synchronize threads across multiple hosts somehow, we do this with a # file in a shared file system. if opts.randommap or opts.fill: fsop.randommap() global start_time start_time = time.perf_counter() instances = [] for i in range(opts.threads): instances.append(fs_drift_instance(i)) if opts.starting_gun_file: open(opts.starting_gun_file, 'a').close() working = True before = fsop.time_before while working: if (opts.stats_report_interval > 0) and (before - last_stat_time > opts.stats_report_interval): if opts.short_stats == True: print_short_stats() else: print_stats() working = False for i in instances: working = i.thread.isAlive() | working if working: time.sleep(opts.stats_report_interval) else: break print_stats() if opts.starting_gun_file: ensure_deleted(opts.starting_gun_file) ensure_deleted(stop_file)
def setUp(self): with open('/tmp/weights.csv', 'w') as w_f: w_f.write( '\n'.join(Test.workload_table)) self.params = opts.parseopts() self.params.duration = 5 self.params.stats_report_interval = 1 self.params.workload_table_csv_path = '/tmp/weights.csv' self.params.verbosity = -1
def run_workload(): log = fsd_log.start_log('fs-drift') # if a --host-set parameter was passed, # it's a multi-host workload # each remote instance will wait # until all instances have reached starting gate try: params = opts.parseopts() params.validate() except FsDriftException as e: log.error(str(e)) log.info('use --help option to get CLI syntax') sys.exit(NOTOK) print(params) if os.getenv('DEBUG'): log.logLevel(logging.DEBUG) try: sync_files.create_top_dirs(params) except OSError as e: if e.errno != errno.EEXIST: raise FsDriftException( 'you must create the top-level directory %s' % params.top_directory) # put parameters where all threads can see them write_pickle(params.param_pickle_path, params) if params.host_set != [] and not params.is_slave: return run_multi_host_workload(params, log) return multi_thread_workload.run_multi_thread_workload(params)
def setUp(self): self.params = opts.parseopts() self.params.duration = 2 self.params.workload_table_csv_path = '/tmp/weights.csv' self.log = fsd_log.start_log('invoke_process')
with open('/tmp/weights.csv', 'w') as w_f: w_f.write('\n'.join([ 'read, 2', 'random_read, 2', 'random_write, 2', 'create, 6', 'truncate, 0.2', 'append, 4', 'delete, 0.2', 'hardlink, 0.3', 'softlink, 0.3', 'rename, 1', 'remount,0.01', ])) params = opts.parseopts() params.workload_table_csv_path = '/tmp/weights.csv' weights = {} log = fsd_log.start_log('fsdevent') weights = parse_weights(params) normalized_weights = normalize_weights(weights) print_weights(normalized_weights) opcode_count = len(FSOPCtx.opname_to_opcode.keys()) histogram = [0 for k in range(0, opcode_count)] # generate 10000 events and analyze frequency for i in range(0, 100000): opcode = gen_event(normalized_weights) histogram[opcode] += 1 # print out histogram results
c.have_readdir += 1 except OSError as e: if e.errno == errno.ENOENT: c.e_dir_not_found += 1 else: return self.scallerr('readdir', dirpath, e) return OK # unit test if __name__ == "__main__": import logging import opts import fsd_log options = opts.parseopts() log = fsd_log.start_log('fsop-unittest') log.info('hi there') if not options.top_directory.__contains__('/tmp/'): raise FsDriftException('bad top directory') os.system('rm -rf %s' % options.top_directory) os.makedirs(options.top_directory) os.chdir(options.top_directory) log.info('chdir to %s' % options.top_directory) ctrs = FSOPCounters() ctx = FSOPCtx(options, log, ctrs, 'test-host', 'test-tid') ctx.verbosity = -1 rc = ctx.op_create() assert (rc == OK) rc = ctx.op_read() assert (rc == OK)
{rq.READ: (read, "read"), rq.RANDOM_READ: (random_read, "random_read"), rq.CREATE: (create, "create"), rq.RANDOM_WRITE: (random_write, "random_write"), rq.APPEND: (append, "append"), rq.LINK: (link, "link"), rq.DELETE: (delete, "delete"), rq.RENAME: (rename, "rename"), rq.TRUNCATE: (truncate, "truncate"), rq.HARDLINK: (hlink, "hardlink"), rq.RANDOM_DISCARD: (random_discard, "random_discard") } if __name__ == "__main__": opts.parseopts() buckets = 20 histogram = [0 for x in range(0, buckets)] with open('/tmp/filenames.list', 'w') as fns: for i in range(0, opts.opcount): fn = gen_random_fn() fns.write(fn + '\n') # print(fn) namelist = fn.split('/') fname = namelist[len(namelist)-1].split('.')[0] # print(fname) num = int(fname[1:]) bucket = num*len(histogram)/opts.max_files histogram[bucket] += 1 print(histogram) assert(sum(histogram) == opts.opcount)