def generate_matrix(experiments, mem, baseline, replace, directory, fast): assert(mem == 'best') db = database.get_instance() for mem_model in experiments: mod = model.parse_model_file(mem_model) model_ml = get_best(db, mod) for experiment in experiments: mod = model.parse_model_file(experiment) time = db.get_result(mod, model_ml) if not time: time, cost = evaluate(mod, model_ml, directory, fast) db.add_result(mod, model_ml, time, cost) print(get_experiment_name(experiment) + ',' + get_experiment_name(mem_model) + ',' + str(time))
def main(): options, args = parser.parse_args() if len(args) < 1: print('ERROR: no model file specified') sys.exit(-1) elif len(args) > 1: print('ERROR: too many model files specified') sys.exit(-1) m = model.parse_model_file(args[0]) if options.best: db = database.get_instance(options.url) best, value, cost = db.get_best(m) if best is None: print('ERROR: Model not found') sys.exit(-1) m.memory = memlist.parse_memory_list(lex.Lexer(StringIO(best))) print('-- Cost: {}, value: {}'.format(cost, value)) name = '-- ' + str(m).replace(') (', ')\n -- (') name = name.replace(')(benchmarks ', ')\n-- (benchmarks ') print(name) for b in m.benchmarks: mem = m.memory.get_subsystem(b.index) if mem.depth < 0: word_size = mem.get_word_size() total_size = b.get_size(options.directory) mem.depth = total_size // word_size assert(mem.depth >= 0) gen = vhdl.VHDLGenerator(m.machine) result = gen.generate(m.memory) print(result)
def main(): global directory, mach options, args = parser.parse_args() experiments = args if args else None if not database.get_instance(options.url): print("ERROR: could not connect to the database") sys.exit(-1) directory = options.directory if len(args) > 0: m = model.parse_model_file(args[0]) mach = m.machine else: mach = machine.MachineType() max_size = get_max_size() line_count = util.round_power2(max_size // (mach.word_size * 8)) while line_count >= 128: line_size = util.round_power2(max_size // 8) while line_size >= mach.word_size: associativity = min(line_count, 8) while associativity >= 1: for policy in get_policies(associativity): generate_cache(line_count, line_size, associativity, policy, True, experiments) generate_cache(line_count, line_size, associativity, policy, False, experiments) associativity //= 2 line_size //= 2 line_count //= 2 print("Total:", total) print("Best Cost: ", best_cost) print("Best Memory:", best_name)
def simulate(experiment, mem, baseline, replace, directory, fast): mod = model.parse_model_file(experiment) db = database.get_instance() ml = get_memory_list(db, mem, mod, baseline, replace) time = db.get_result(mod, ml) if time is None: time, cost = evaluate(mod, ml, directory, fast) db.add_result(mod, ml, time, cost) print(get_experiment_name(experiment) + ',' + str(time))
def get_stats(experiments, mem, baseline, replace, directory): db = database.get_instance() for experiment in experiments: mod = model.parse_model_file(experiment) ml = get_memory_list(db, mem, mod, baseline, replace) ml.reset(mod.machine) pl = ml.get_max_path_length() name = get_experiment_name(experiment) cost = ml.get_cost() print('{},{},{}'.format(name, pl, cost))
def get_frequency(experiment, mem, baseline, keep): m = model.parse_model_file(experiment) db = database.get_instance() if mem == 'model': subsystem = m.memory elif mem == 'baseline': with open(baseline, 'r') as f: subsystem = memory.parse_memory_list(lex.Lexer(f)) elif mem == 'best': best_name, _, _ = db.get_best(m) best_file = StringIO.StringIO(best_name) subsystem = memory.parse_memory_list(lex.Lexer(best_file)) else: print('ERROR: invalid memory selected:', mem) sys.exit(-1) m.machine.frequency = 1 << 31 result = xilinx.run_xilinx(m.machine, subsystem, keep) print(get_experiment_name(experiment) + ',' + str(result.frequency) + ',' + str(result.bram_count))
def start_experiment(context): seed = context.seed server = context.server iterations = context.iterations directory = context.directory experiments = context.experiments # Determine the experiment to run and update the seed. context.seed = (seed + 1) % (2 << 31) experiment_count = len(experiments) experiment = experiments[seed % experiment_count] # Load the model and its current state. m = model.parse_model_file(experiment) if not m: print('ERROR: could not read model:', experiment) return False # Only start the thread if there is work to do. if server.db.get_result_count(m) >= iterations: return False # Create a shared database instance. name = os.path.basename(experiment) db = server.add_client(name) print('Starting {}'.format(name)) # Start the thread. args = { 'db': db, 'iterations': iterations, 'mod': m, 'directory': directory, 'seed': seed, } pool = main_context.pool main_context.thread_count += 1 pool.apply_async(run_experiment, kwds=args, callback=experiment_done) return True
def run_simulation(mem, experiment): print(" Running", experiment) m = model.parse_model_file(experiment) if m.machine.target != mach.target: print("ERROR: wrong target for", experiment) sys.exit(-1) if m.machine.frequency != mach.frequency: print("ERROR: wrong frequency for", experiment) sys.exit(-1) if m.machine.technology != mach.technology: print("ERROR: wrong technology for", experiment) sys.exit(-1) if m.machine.max_path_length != mach.max_path_length: print("ERROR: wrong max path length for", experiment) sys.exit(-1) if m.machine.part != mach.part: print("ERROR: wrong part for", experiment) sys.exit(-1) if m.machine.word_size != mach.word_size: print("ERROR: wrong word size for", experiment) sys.exit(-1) if m.machine.addr_bits != mach.addr_bits: print("ERROR: wrong addr bits for", experiment) sys.exit(-1) if m.machine.max_cost != mach.max_cost: print("ERROR: wrong max cost for", experiment) sys.exit(-1) mem.set_main(m.memory) db = database.get_instance() result = db.get_result(m, mem) if result is None: ml = memory.MemoryList(m.memory) ml.add_memory(mem) result, cost = evaluate(m, ml, directory) db.add_result(m, mem, result, cost) return result