def main(): global directory, mach options, args = parser.parse_args() experiments = args if args else None if not database.get_instance(options.url): print("ERROR: could not connect to the database") sys.exit(-1) directory = options.directory if len(args) > 0: m = model.parse_model_file(args[0]) mach = m.machine else: mach = machine.MachineType() max_size = get_max_size() line_count = util.round_power2(max_size // (mach.word_size * 8)) while line_count >= 128: line_size = util.round_power2(max_size // 8) while line_size >= mach.word_size: associativity = min(line_count, 8) while associativity >= 1: for policy in get_policies(associativity): generate_cache(line_count, line_size, associativity, policy, True, experiments) generate_cache(line_count, line_size, associativity, policy, False, experiments) associativity //= 2 line_size //= 2 line_count //= 2 print("Total:", total) print("Best Cost: ", best_cost) print("Best Memory:", best_name)
def main(): options, args = parser.parse_args() if len(args) < 1: print('ERROR: no model file specified') sys.exit(-1) elif len(args) > 1: print('ERROR: too many model files specified') sys.exit(-1) m = model.parse_model_file(args[0]) if options.best: db = database.get_instance(options.url) best, value, cost = db.get_best(m) if best is None: print('ERROR: Model not found') sys.exit(-1) m.memory = memlist.parse_memory_list(lex.Lexer(StringIO(best))) print('-- Cost: {}, value: {}'.format(cost, value)) name = '-- ' + str(m).replace(') (', ')\n -- (') name = name.replace(')(benchmarks ', ')\n-- (benchmarks ') print(name) for b in m.benchmarks: mem = m.memory.get_subsystem(b.index) if mem.depth < 0: word_size = mem.get_word_size() total_size = b.get_size(options.directory) mem.depth = total_size // word_size assert(mem.depth >= 0) gen = vhdl.VHDLGenerator(m.machine) result = gen.generate(m.memory) print(result)
def simulate(experiment, mem, baseline, replace, directory, fast): mod = model.parse_model_file(experiment) db = database.get_instance() ml = get_memory_list(db, mem, mod, baseline, replace) time = db.get_result(mod, ml) if time is None: time, cost = evaluate(mod, ml, directory, fast) db.add_result(mod, ml, time, cost) print(get_experiment_name(experiment) + ',' + str(time))
def main(): options, args = parser.parse_args() db = database.get_instance(options.url) if options.show: show_state(db, args) elif options.memory: dump_spec(db, args) else: show_pending(db, args)
def get_stats(experiments, mem, baseline, replace, directory): db = database.get_instance() for experiment in experiments: mod = model.parse_model_file(experiment) ml = get_memory_list(db, mem, mod, baseline, replace) ml.reset(mod.machine) pl = ml.get_max_path_length() name = get_experiment_name(experiment) cost = ml.get_cost() print('{},{},{}'.format(name, pl, cost))
def main(): options, args = parser.parse_args() if not args: print('ERROR: no model(s) specified') sys.exit(-1) db = database.get_instance(options.url) if not db.connect(): print('ERROR: could not connect to the database') sys.exit(-1) for experiment in args: get_frequency(experiment, options.memory, options.baseline, options.keep)
def generate_matrix(experiments, mem, baseline, replace, directory, fast): assert(mem == 'best') db = database.get_instance() for mem_model in experiments: mod = model.parse_model_file(mem_model) model_ml = get_best(db, mod) for experiment in experiments: mod = model.parse_model_file(experiment) time = db.get_result(mod, model_ml) if not time: time, cost = evaluate(mod, model_ml, directory, fast) db.add_result(mod, model_ml, time, cost) print(get_experiment_name(experiment) + ',' + get_experiment_name(mem_model) + ',' + str(time))
def main(): global main_context # Parse arguments. options, args = parser.parse_args() if args is None: print('ERROR: no experiment specified') sys.exit(-1) main_context.experiments = args main_context.directory = options.directory main_context.seed = int(options.seed) if options.seed else int(time.time()) main_context.iterations = int(options.iterations) main_context.verbose = options.verbose main_context.full = options.full db = database.get_instance(options.url) # Create the database server. manager = multiprocessing.Manager() main_context.server = DatabaseServer(manager, db, show_status) # Create the thread pool. max_threads = int(options.threads) main_context.verbose = main_context.verbose or max_threads == 1 main_context.pool = multiprocessing.Pool(max_threads, experiment_init) # Run the experiments, starting new ones as necessary. try: active = True while not main_context.stop: while main_context.server.run(): pass if active and main_context.thread_count < max_threads: active = False for _ in main_context.experiments: if start_experiment(main_context): active = True break elif main_context.thread_count == 0: main_context.stop = True break gc.collect() time.sleep(0.125) main_context.pool.terminate() except KeyboardInterrupt: main_context.pool.terminate() main_context.pool = None print('Done') sys.exit(0)
def _run_cacti(params): """Get the result of running CACTI with the specified parameters.""" # Check if we already tried a memory with these parameters. db = database.get_instance() temp = db.get_cacti_result(params) if temp: return CACTIResult(access_time=temp[0], cycle_time=temp[1], area=temp[2]) # Find cacti. cacti_exe = _find_cacti() # Generate a file containing the parameters for CACTI. fd, file_name = tempfile.mkstemp(suffix='.cacti', prefix='ms', dir=None, text=True) with os.fdopen(fd, 'w') as f: _generate_file(f, params) # Run CACTI. try: buf = subprocess.check_output([cacti_exe, '-infile', file_name]) except subprocess.CalledProcessError: buf = '' finally: os.remove(file_name) # Extract the area, access time, and cycle time from the CACTI results. result = CACTIResult() m = area_regex.search(buf) if m: result.area = int(math.ceil(float(m.group(1)) * 1000.0 * 1000.0)) else: result.area = 1 << 31 m = access_regex.search(buf) result.access_time = float(m.group(1)) if m else (1 << 31) m = cycle_regex.search(buf) result.cycle_time = float(m.group(1)) if m else result.access_time db.add_cacti_result(params, result.access_time, result.cycle_time, result.area) return result
def main(): options, args = parser.parse_args() if len(args) == 0: print('ERROR: no models specified') sys.exit(-1) if not database.get_instance(options.url): print('ERROR: could not connect to the database') sys.exit(-1) directory = options.directory if options.directory else os.getcwd() if options.stats: get_stats(args, options.memory, options.baseline, options.replace, directory) elif options.compare: generate_matrix(args, options.memory, options.baseline, options.replace, directory) else: generate_array(args, options.memory, options.baseline, options.replace, directory, options.fast)
def get_frequency(experiment, mem, baseline, keep): m = model.parse_model_file(experiment) db = database.get_instance() if mem == 'model': subsystem = m.memory elif mem == 'baseline': with open(baseline, 'r') as f: subsystem = memory.parse_memory_list(lex.Lexer(f)) elif mem == 'best': best_name, _, _ = db.get_best(m) best_file = StringIO.StringIO(best_name) subsystem = memory.parse_memory_list(lex.Lexer(best_file)) else: print('ERROR: invalid memory selected:', mem) sys.exit(-1) m.machine.frequency = 1 << 31 result = xilinx.run_xilinx(m.machine, subsystem, keep) print(get_experiment_name(experiment) + ',' + str(result.frequency) + ',' + str(result.bram_count))
def run_simulation(mem, experiment): print(" Running", experiment) m = model.parse_model_file(experiment) if m.machine.target != mach.target: print("ERROR: wrong target for", experiment) sys.exit(-1) if m.machine.frequency != mach.frequency: print("ERROR: wrong frequency for", experiment) sys.exit(-1) if m.machine.technology != mach.technology: print("ERROR: wrong technology for", experiment) sys.exit(-1) if m.machine.max_path_length != mach.max_path_length: print("ERROR: wrong max path length for", experiment) sys.exit(-1) if m.machine.part != mach.part: print("ERROR: wrong part for", experiment) sys.exit(-1) if m.machine.word_size != mach.word_size: print("ERROR: wrong word size for", experiment) sys.exit(-1) if m.machine.addr_bits != mach.addr_bits: print("ERROR: wrong addr bits for", experiment) sys.exit(-1) if m.machine.max_cost != mach.max_cost: print("ERROR: wrong max cost for", experiment) sys.exit(-1) mem.set_main(m.memory) db = database.get_instance() result = db.get_result(m, mem) if result is None: ml = memory.MemoryList(m.memory) ml.add_memory(mem) result, cost = evaluate(m, ml, directory) db.add_result(m, mem, result, cost) return result
def run_xilinx(machine, mem, keep=False): """Get the results of running XST on the specified memory.""" # Clone the memory so we can safely modify it. mem = mem.clone() # If we got a memory list, get timing for the complete # memory subsystem. Otherwise, only report timing for the # specified component. if isinstance(mem, memlist.MemoryList): word_size = mem.get_main().get_word_size() main = ram.RAM(word_size=word_size, latency=0) ml = mem elif hasattr(mem, 'is_fifo'): word_size = mem.get_word_size() main = ram.RAM(word_size=word_size, latency=0) mem.set_next(main) ml = memlist.MemoryList(main) mem.index = 1 ml.add_memory(mem) else: next_word_size = mem.get_next().get_word_size() main = ram.RAM(word_size=next_word_size, latency=0) mem.set_next(main) ml = memlist.MemoryList(main) ml.add_memory(subsystem.Subsystem(0, mem.get_word_size(), -1, mem)) ml.set_main(main) name = machine.part + str(ml) # Determine if we've already processed this memory. db = database.get_instance() temp = db.get_fpga_result(name) if temp: return XilinxResult(temp[0], temp[1], temp[2], temp[3]) print(name) # Create a directory for this run. old_dir = os.getcwd() dname = tempfile.mkdtemp(suffix='', prefix='ms') vhdl_file = dname + '/top.vhdl' project_file = dname + '/mem.prj' script_file = dname + '/mem.scr' ngc_file = dname + '/mem.ngc' result_file = dname + '/mem.srp' try: # Generate the HDL for the component. gen = vhdl.VHDLGenerator(machine) hdl = gen.generate(ml) with open(vhdl_file, 'w') as f: f.write(hdl) # Generate the XST project file. with open(project_file, 'w') as f: f.write('vhdl work ' + old_dir + '/hdl/adapter.vhdl\n') f.write('vhdl work ' + old_dir + '/hdl/arbiter.vhdl\n') f.write('vhdl work ' + old_dir + '/hdl/cache.vhdl\n') f.write('vhdl work ' + old_dir + '/hdl/combine.vhdl\n') f.write('vhdl work ' + old_dir + '/hdl/eor.vhdl\n') f.write('vhdl work ' + old_dir + '/hdl/fifo.vhdl\n') f.write('vhdl work ' + old_dir + '/hdl/offset.vhdl\n') f.write('vhdl work ' + old_dir + '/hdl/prefetch.vhdl\n') f.write('vhdl work ' + old_dir + '/hdl/shift.vhdl\n') f.write('vhdl work ' + old_dir + '/hdl/spm.vhdl\n') f.write('vhdl work ' + old_dir + '/hdl/split.vhdl\n') f.write('vhdl work ' + old_dir + '/hdl/ram.vhdl\n') f.write('vhdl work ' + vhdl_file + '\n') # Generate the XST script file. with open(script_file, 'w') as f: f.write("run -ifn " + project_file + " -ifmt mixed -top mem" + " -ofn " + ngc_file + " -ofmt NGC -p " + machine.part + " -ram_style block -opt_mode Speed -opt_level 2" + " -register_balancing yes -keep_hierarchy no") # Run XST. os.chdir(dname) os.system("xst -ifn " + script_file + " >/dev/null 2>/dev/null") # Parse results. result = XilinxResult() with open(result_file, "r") as f: buf = f.read() m = freq_regex.search(buf) if m is None: raise Exception('Could not determine frequency') result.frequency = float(m.group(1)) * 1000000.0 m = bram_regex.search(buf) if m is not None: result.bram_count = int(m.group(1)) else: result.bram_count = 0 m = reg_regex.search(buf) if m is None: raise Exception('Could not determine slice registers') result.register_count = int(m.group(1)) m = lut_regex.search(buf) if m is None: raise Exception('Could not determine slice LUTs') result.lut_count = int(m.group(1)) # Delete the project directory only if successful. os.chdir(old_dir) if keep: print("XST working directory:", dname) else: shutil.rmtree(dname) # Save and return the result. db.add_fpga_result(name, result.frequency, result.bram_count, result.lut_count, result.register_count) return result except Exception as e: os.chdir(old_dir) print('ERROR: XST run failed:', e) print('ERROR: Memory:', mem) raise