def main(): opts, args = parse_args() exp_dirs = get_dirs(args) # Load experiment parameters into a ColMap builder = ColMapBuilder() exps = load_exps(exp_dirs, builder, opts.force) # Don't track changes in ignored parameters if opts.ignore: for param in opts.ignore.split(","): builder.try_remove(param) # Always average multiple trials builder.try_remove(PARAMS['trial']) # Only need this for feather-trace parsing builder.try_remove(PARAMS['cycles']) col_map = builder.build() table = TupleTable(col_map) fill_table(table, exps, opts) if not table: sys.stderr.write("Found no data to parse!") sys.exit(1) write_output(table, opts)
def write_collapsed_csvs(table, opts): sys.stderr.write("Collapse option specified. " "Only one numeric column at a time will be plotted.\n" "The values of others will be averaged. " "This is dangerous and can hide important trends!\n") original_map = table.get_col_map() builder = ColMapBuilder() numeric_cols = [] # Add only nonnumeric fields to builder for column in original_map.columns(): numeric = True for v in original_map.get_values()[column]: try: float(v) except ValueError: numeric = False builder.try_add(column, v) if numeric: numeric_cols += [column] for num_column in numeric_cols: # Only going to consider a single number column at a time for num_value in original_map.get_values()[column]: builder.try_add(num_column, num_value) next_map = builder.build() next_table = TupleTable(next_map) # Re-sort data into new table using this new key for mapped_key, points in table: kv = original_map.get_kv(mapped_key) next_table[kv] += points write_csvs(next_table, opts.out) builder.try_remove(num_column)
def main(): opts, args = parse_args() args = args or [os.getcwd()] # Load exp parameters into a ColMap builder = ColMapBuilder() exps = load_exps(args, builder, opts.force) # Don't track changes in ignored parameters if opts.ignore: for param in opts.ignore.split(","): builder.try_remove(param) # Always average multiple trials builder.try_remove(conf.PARAMS['trial']) col_map = builder.build() result_table = TupleTable(col_map) sys.stderr.write("Parsing data...\n") procs = min(len(exps), max(cpu_count()/2, 1)) pool = Pool(processes=procs) pool_args = zip(exps, [opts.force]*len(exps)) enum = pool.imap_unordered(parse_exp, pool_args, 1) try: for i, (exp, result) in enumerate(enum): if opts.verbose: print(result) else: sys.stderr.write('\r {0:.2%}'.format(float(i)/len(exps))) result_table[exp.params] += [result] pool.close() except: pool.terminate() traceback.print_exc() raise Exception("Failed parsing!") finally: pool.join() sys.stderr.write('\n') if opts.force and os.path.exists(opts.out): sh.rmtree(opts.out) reduced_table = result_table.reduce() sys.stderr.write("Writing result...\n") if opts.write_map: # Write summarized results into map reduced_table.write_map(opts.out) else: # Write out csv directories for all variable params dir_map = reduced_table.to_dir_map() # No csvs to write, assume user meant to print out data if dir_map.is_empty(): if not opts.verbose: sys.stderr.write("Too little data to make csv files.\n") for key, exp in result_table: for e in exp: print(e) else: dir_map.write(opts.out)