def _run_vivado(build_name, vivado_path, source, ver=None): if sys.platform == "win32" or sys.platform == "cygwin": build_script_contents = "REM Autogenerated by Migen\n" build_script_contents += "vivado -mode batch -source " + build_name + ".tcl\n" build_script_file = "build_" + build_name + ".bat" tools.write_to_file(build_script_file, build_script_contents) command = build_script_file else: build_script_contents = "# Autogenerated by Migen\nset -e\n" # For backwards compatibility with ISE paths, also # look for a version in a subdirectory named "Vivado" # under the current directory. paths_to_try = [vivado_path, os.path.join(vivado_path, "Vivado")] for p in paths_to_try: try: settings = common.settings(p, ver) except OSError: continue break else: raise OSError("Unable to locate Vivado directory or settings.") build_script_contents += "source " + settings + "\n" build_script_contents += "vivado -mode batch -source " + build_name + ".tcl\n" build_script_file = "build_" + build_name + ".sh" tools.write_to_file(build_script_file, build_script_contents) command = ["bash", build_script_file] r = tools.subprocess_call_filtered(command, common.colors) if r != 0: raise OSError("Subprocess failed")
def _build_batch(self, platform, sources, build_name): tcl = [] for filename, language, library in sources: filename_tcl = "{" + filename + "}" tcl.append("add_files " + filename_tcl) tcl.append("set_property library {} [get_files {}]" .format(library, filename_tcl)) tcl.append("read_xdc {}.xdc".format(build_name)) tcl.extend(c.format(build_name=build_name) for c in self.pre_synthesis_commands) tcl.append("synth_design -top {} -part {} -include_dirs {{{}}}".format(build_name, platform.device, " ".join(platform.verilog_include_paths))) tcl.append("report_utilization -hierarchical -file {}_utilization_hierarchical_synth.rpt".format(build_name)) tcl.append("report_utilization -file {}_utilization_synth.rpt".format(build_name)) tcl.append("place_design") if self.with_phys_opt: tcl.append("phys_opt_design -directive AddRetime") tcl.append("report_utilization -hierarchical -file {}_utilization_hierarchical_place.rpt".format(build_name)) tcl.append("report_utilization -file {}_utilization_place.rpt".format(build_name)) tcl.append("report_io -file {}_io.rpt".format(build_name)) tcl.append("report_control_sets -verbose -file {}_control_sets.rpt".format(build_name)) tcl.append("report_clock_utilization -file {}_clock_utilization.rpt".format(build_name)) tcl.append("route_design") tcl.append("report_route_status -file {}_route_status.rpt".format(build_name)) tcl.append("report_drc -file {}_drc.rpt".format(build_name)) tcl.append("report_timing_summary -max_paths 10 -file {}_timing.rpt".format(build_name)) tcl.append("report_power -file {}_power.rpt".format(build_name)) for bitstream_command in self.bitstream_commands: tcl.append(bitstream_command.format(build_name=build_name)) tcl.append("write_bitstream -force {}.bit ".format(build_name)) for additional_command in self.additional_commands: tcl.append(additional_command.format(build_name=build_name)) tcl.append("quit") tools.write_to_file(build_name + ".tcl", "\n".join(tcl))
def build(self, platform, fragment, build_dir="build", build_name="top", toolchain_path=None, run=True, **kwargs): if toolchain_path is None: toolchain_path = "/opt/Diamond" os.makedirs(build_dir, exist_ok=True) cwd = os.getcwd() os.chdir(build_dir) if not isinstance(fragment, _Fragment): fragment = fragment.get_fragment() platform.finalize(fragment) v_output = platform.get_verilog(fragment, name=build_name, **kwargs) named_sc, named_pc = platform.resolve_signals(v_output.ns) v_file = build_name + ".v" v_output.write(v_file) sources = platform.sources | {(v_file, "verilog", "work")} _build_files(platform.device, sources, platform.verilog_include_paths, build_name) tools.write_to_file(build_name + ".lpf", _build_lpf(named_sc, named_pc)) script = _build_script(build_name, platform.device, toolchain_path) if run: _run_script(script) os.chdir(cwd) return v_output.ns
def _build_script(build_name, device, toolchain_path, ver=None): if sys.platform in ("win32", "cygwin"): script_ext = ".bat" build_script_contents = "@echo off\nrem Autogenerated by Migen\n\n" copy_stmt = "copy" fail_stmt = " || exit /b" else: script_ext = ".sh" build_script_contents = "# Autogenerated by Migen\nset -e\n\n" copy_stmt = "cp" fail_stmt = "" if sys.platform not in ("win32", "cygwin"): build_script_contents += "bindir={}\n".format(toolchain_path) build_script_contents += ". ${{bindir}}/diamond_env{fail_stmt}\n".format( fail_stmt=fail_stmt) build_script_contents += "{pnmainc} {tcl_script}{fail_stmt}\n".format( pnmainc=os.path.join(toolchain_path, "pnmainc"), tcl_script=build_name + ".tcl", fail_stmt=fail_stmt) for ext in (".bit", ".jed"): if ext == ".jed" and not _produces_jedec(device): continue build_script_contents += "{copy_stmt} {diamond_product} {migen_product}" \ "{fail_stmt}\n".format( copy_stmt=copy_stmt, fail_stmt=fail_stmt, diamond_product=os.path.join("impl", build_name + "_impl" + ext), migen_product=build_name + ext) build_script_file = "build_" + build_name + script_ext tools.write_to_file(build_script_file, build_script_contents, force_unix=False) return build_script_file
def _build_sim(platform, vns, build_name, include_paths, serial, verbose): include = "" for path in include_paths: include += "-I"+path+" " build_script_contents = """# Autogenerated by LiteX rm -rf obj_dir/ verilator {disable_warnings} -O3 --cc dut.v --exe dut_tb.cpp -LDFLAGS "-lpthread -lSDL" -trace {include} make -j -C obj_dir/ -f Vdut.mk Vdut """.format( disable_warnings="-Wno-fatal", include=include) build_script_file = "build_" + build_name + ".sh" tools.write_to_file(build_script_file, build_script_contents, force_unix=True) _build_tb(platform, vns, serial, os.path.join(sim_directory, "dut_tb.cpp")) p = subprocess.Popen(["bash", build_script_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, _ = p.communicate() output = output.decode('utf-8') if p.returncode != 0: error_messages = [] for l in output.splitlines(): if verbose or "error" in l.lower(): error_messages.append(l) raise OSError("Subprocess failed with {}\n{}".format(p.returncode, "\n".join(error_messages))) if verbose: print(output)
def build(self, platform, fragment, build_dir="build", build_name="top", toolchain_path="/opt/Xilinx/Vivado", source=True, run=True, synth_mode="vivado", enable_xpm=False, **kwargs): if toolchain_path is None: toolchain_path = "/opt/Xilinx/Vivado" os.makedirs(build_dir, exist_ok=True) cwd = os.getcwd() os.chdir(build_dir) if not isinstance(fragment, _Fragment): fragment = fragment.get_fragment() platform.finalize(fragment) self._convert_clocks(platform) self._constrain(platform) v_output = platform.get_verilog(fragment, name=build_name, **kwargs) named_sc, named_pc = platform.resolve_signals(v_output.ns) v_file = build_name + ".v" v_output.write(v_file) sources = platform.sources | {(v_file, "verilog", "work")} edifs = platform.edifs ips = platform.ips self._build_batch(platform, sources, edifs, ips, build_name, synth_mode, enable_xpm) tools.write_to_file(build_name + ".xdc", _build_xdc(named_sc, named_pc)) if run: if synth_mode == "yosys": common._run_yosys(platform.device, sources, platform.verilog_include_paths, build_name) _run_vivado(build_name, toolchain_path, source) os.chdir(cwd) return v_output.ns
def _run_sim(build_name): run_script_contents = """obj_dir/Vdut """ run_script_file = "run_" + build_name + ".sh" tools.write_to_file(run_script_file, run_script_contents, force_unix=True) r = subprocess.call(["bash", run_script_file]) if r != 0: raise OSError("Subprocess failed")
def export_csv(self, vns, filename): def format_line(*args): return ",".join(args) + "\n" r = format_line("config", "dw", str(self.dw)) r += format_line("config", "depth", str(self.depth)) r += format_line("config", "cd_ratio", str(int(self.cd_ratio))) for s in self.signals: r += format_line("signal", vns.get_name(s), str(len(s))) write_to_file(filename, r)
def _generate_sim_variables(include_paths): include = "" for path in include_paths: include += "-I"+path+" " content = """\ SRC_DIR = {} INC_DIR = {} """.format(core_directory, include) tools.write_to_file("variables.mak", content)
def _generate_csr_csv(self): memory_regions = self.soc.get_memory_regions() csr_regions = self.soc.get_csr_regions() constants = self.soc.get_constants() csr_dir = os.path.dirname(self.csr_csv) os.makedirs(csr_dir, exist_ok=True) write_to_file( self.csr_csv, cpu_interface.get_csr_csv(csr_regions, constants, memory_regions))
def _build_io_pdc(named_sc, named_pc, build_name, additional_io_constraints): pdc = "" for sig, pins, others, resname in named_sc: if len(pins) > 1: for i, p in enumerate(pins): pdc += _format_io_pdc(sig + "[" + str(i) + "]", p, others) else: pdc += _format_io_pdc(sig, pins[0], others) pdc += "\n".join(additional_io_constraints) tools.write_to_file(build_name + "_io.pdc", pdc)
def build(self, platform, fragment, build_dir="build", build_name="top", toolchain_path=None, source=True, run=True, mode="xst", **kwargs): if not isinstance(fragment, _Fragment): fragment = fragment.get_fragment() if toolchain_path is None: if sys.platform == "win32": toolchain_path = "C:\\Xilinx" elif sys.platform == "cygwin": toolchain_path = "/cygdrive/c/Xilinx" else: toolchain_path = "/opt/Xilinx" platform.finalize(fragment) ngdbuild_opt = self.ngdbuild_opt vns = None os.makedirs(build_dir, exist_ok=True) cwd = os.getcwd() os.chdir(build_dir) try: if mode in ("xst", "yosys", "cpld"): v_output = platform.get_verilog(fragment, name=build_name, **kwargs) vns = v_output.ns named_sc, named_pc = platform.resolve_signals(vns) v_file = build_name + ".v" v_output.write(v_file) sources = platform.sources | {(v_file, "verilog", "work")} if mode in ("xst", "cpld"): _build_xst_files(platform.device, sources, platform.verilog_include_paths, build_name, self.xst_opt) isemode = mode else: _run_yosys(platform.device, sources, platform.verilog_include_paths, build_name) isemode = "edif" ngdbuild_opt += "-p " + platform.device if mode == "mist": from mist import synthesize synthesize(fragment, platform.constraint_manager.get_io_signals()) if mode == "edif" or mode == "mist": e_output = platform.get_edif(fragment) vns = e_output.ns named_sc, named_pc = platform.resolve_signals(vns) e_file = build_name + ".edif" e_output.write(e_file) isemode = "edif" tools.write_to_file(build_name + ".ucf", _build_ucf(named_sc, named_pc)) if run: _run_ise(build_name, toolchain_path, source, isemode, ngdbuild_opt, self, platform) finally: os.chdir(cwd) return vns
def export(self, vns, filename): def format_line(*args): return ",".join(args) + "\n" r = "" r += format_line("config", "dw", str(self.dw)) r += format_line("config", "depth", str(self.depth)) r += format_line("config", "with_rle", str(int(self.with_rle))) if not isinstance(self.layout, tuple): self.layout = [self.layout] for e in self.layout: r += format_line("layout", vns.get_name(e), str(len(e))) write_to_file(filename, r)
def _run_diamond(build_name, source, ver=None): if sys.platform == "win32" or sys.platform == "cygwin": build_script_contents = "REM Autogenerated by LiteX\n" build_script_contents = "pnmainc " + build_name + ".tcl\n" build_script_file = "build_" + build_name + ".bat" tools.write_to_file(build_script_file, build_script_contents) r = subprocess.call([build_script_file]) shutil.copy(os.path.join("implementation", build_name + "_implementation.bit"), build_name + ".bit") else: raise NotImplementedError if r != 0: raise OSError("Subprocess failed")
def _build_files(device, sources, vincpaths, build_name): tcl = [] tcl.append("prj_project new -name \"{}\" -impl \"implementation\" -dev {} -synthesis \"synplify\"".format(build_name, device)) for path in vincpaths: tcl.append("prj_impl option {include path} {\"" + path + "\"}") for filename, language, library in sources: tcl.append("prj_src add \"" + filename + "\" -work " + library) tcl.append("prj_run Synthesis -impl implementation -forceOne") tcl.append("prj_run Translate -impl implementation") tcl.append("prj_run Map -impl implementation") tcl.append("prj_run PAR -impl implementation") tcl.append("prj_run Export -impl implementation -task Bitgen") tools.write_to_file(build_name + ".tcl", "\n".join(tcl))
def _run_ise(build_name, ise_path, source, mode, ngdbuild_opt, toolchain, platform, ver=None): if sys.platform == "win32" or sys.platform == "cygwin": source_cmd = "call " script_ext = ".bat" shell = ["cmd", "/c"] build_script_contents = "@echo off\nrem Autogenerated by Migen\n" fail_stmt = " || exit /b" else: source_cmd = "source " script_ext = ".sh" shell = ["bash"] build_script_contents = "# Autogenerated by Migen\nset -e\n" fail_stmt = "" if source: settings = common.settings(ise_path, ver, "ISE_DS") build_script_contents += source_cmd + tools.cygpath(settings) + "\n" if mode == "edif": ext = "edif" else: ext = "ngc" build_script_contents += """ xst -ifn {build_name}.xst{fail_stmt} """ build_script_contents += """ ngdbuild {ngdbuild_opt} -uc {build_name}.ucf {build_name}.{ext} {build_name}.ngd{fail_stmt} """ if mode == "cpld": build_script_contents += """ cpldfit -ofmt verilog {par_opt} -p {device} {build_name}.ngd{fail_stmt} taengine -f {build_name}.vm6 -detail -iopath -l {build_name}.tim{fail_stmt} hprep6 -s IEEE1532 -i {build_name}.vm6{fail_stmt} """ else: build_script_contents += """ map {map_opt} -o {build_name}_map.ncd {build_name}.ngd {build_name}.pcf{fail_stmt} par {par_opt} {build_name}_map.ncd {build_name}.ncd {build_name}.pcf{fail_stmt} bitgen {bitgen_opt} {build_name}.ncd {build_name}.bit{fail_stmt} """ build_script_contents = build_script_contents.format(build_name=build_name, ngdbuild_opt=ngdbuild_opt, bitgen_opt=toolchain.bitgen_opt, ext=ext, par_opt=toolchain.par_opt, map_opt=toolchain.map_opt, device=platform.device, fail_stmt=fail_stmt) build_script_contents += toolchain.ise_commands.format(build_name=build_name) build_script_file = "build_" + build_name + script_ext tools.write_to_file(build_script_file, build_script_contents, force_unix=False) command = shell + [build_script_file] r = tools.subprocess_call_filtered(command, common.colors) if r != 0: raise OSError("Subprocess failed")
def main(): if len(sys.argv) < 2: print("usage: litex_read_verilog verilog_file [module]") exit(1) verilog_file = sys.argv[1] json_file = verilog_file + ".json" module = None if len(sys.argv) < 3 else sys.argv[2] # use yosys to convert verilog to json yosys_v2j = "\n".join([ "read_verilog -sv {}".format(verilog_file), "write_json {}.json".format(verilog_file) ]) tools.write_to_file("yosys_v2j.ys", yosys_v2j) os.system("yosys -q yosys_v2j.ys") # load json and convert to migen module f = open(json_file, "r") j = json.load(f) # create list of modules modules = [module] if module is not None else j["modules"].keys() # create migen definitions for module in modules: migen_def = [] migen_def.append("class {}(Module):".format(module)) migen_def.append(" "*4 + "def __init__(self):") for name, info in j["modules"][module]["ports"].items(): length = "" if len(info["bits"]) == 1 else len(info["bits"]) migen_def.append(" " * 8 + "self.{} = Signal({})".format(name, length)) migen_def.append("") migen_def.append(" "*8 + "# # #") migen_def.append("") migen_def.append(" "*8 + "self.specials += Instance(\"{}\",".format(module)) for name, info in j["modules"][module]["ports"].items(): io_prefix = { "input": "i", "output": "o", "inout": "io" }[info["direction"]] migen_def.append(" "*12 + "{}_{}=self.{},".format(io_prefix, name, name)) migen_def.append(" "*8 + ")") migen_def.append("") print("\n".join(migen_def)) # keep things clean after us os.system("rm " + json_file)
def _build_script(build_name, device, toolchain_path, ver=None): if sys.platform in ("win32", "cygwin"): script_ext = ".bat" build_script_contents = "@echo off\nrem Autogenerated by Migen\n\n" copy_stmt = "copy" fail_stmt = " || exit /b" else: script_ext = ".sh" build_script_contents = "# Autogenerated by Migen\n\n" copy_stmt = "cp" fail_stmt = " || exit 1" build_script_file = "build_" + build_name + script_ext tools.write_to_file(build_script_file, build_script_contents, force_unix=False) return build_script_file
def _build_xst_files(device, sources, vincpaths, build_name, xst_opt): prj_contents = "" for filename, language, library in sources: prj_contents += language + " " + library + " " + filename + "\n" tools.write_to_file(build_name + ".prj", prj_contents) xst_contents = """run -ifn {build_name}.prj -top {build_name} {xst_opt} -ofn {build_name}.ngc -p {device} """.format(build_name=build_name, xst_opt=xst_opt, device=device) for path in vincpaths: xst_contents += "-vlgincdir " + path + "\n" tools.write_to_file(build_name + ".xst", xst_contents)
def _run_sim(build_name, as_root=False): run_script_contents = "sudo " if as_root else "" run_script_contents += "obj_dir/Vdut" run_script_file = "run_" + build_name + ".sh" tools.write_to_file(run_script_file, run_script_contents, force_unix=True) if sys.platform != "win32": import termios termios_settings = termios.tcgetattr(sys.stdin.fileno()) try: r = subprocess.call(["bash", run_script_file]) if r != 0: raise OSError("Subprocess failed") except: pass if sys.platform != "win32": termios.tcsetattr(sys.stdin.fileno(), termios.TCSAFLUSH, termios_settings)
def _build_sim(build_name, sources, threads, coverage): makefile = os.path.join(core_directory, 'Makefile') cc_srcs = [] for filename, language, library in sources: cc_srcs.append("--cc " + filename + " ") build_script_contents = """\ rm -rf obj_dir/ make -C . -f {} {} {} {} mkdir -p modules && cp obj_dir/*.so modules """.format(makefile, "CC_SRCS=\"{}\"".format("".join(cc_srcs)), "THREADS={}".format(threads) if int(threads) > 1 else "", "COVERAGE=1" if coverage else "", ) build_script_file = "build_" + build_name + ".sh" tools.write_to_file(build_script_file, build_script_contents, force_unix=True)
def _run_yosys(device, sources, vincpaths, build_name): ys_contents = "" incflags = "" for path in vincpaths: incflags += " -I" + path for filename, language, library in sources: ys_contents += "read_{}{} {}\n".format(language, incflags, filename) ys_contents += """\ hierarchy -top top # FIXME: Are these needed? # proc; memory; opt; fsm; opt # Map keep to keep=1 for yosys log log XX. Converting (* keep = "xxxx" *) attribute for Yosys log attrmap -tocase keep -imap keep="true" keep=1 -imap keep="false" keep=0 -remove keep=0 select -list a:keep=1 # Add keep=1 for yosys to objects which have dont_touch="true" attribute. log log XX. Converting (* dont_touch = "true" *) attribute for Yosys log select -list a:dont_touch=true setattr -set keep 1 a:dont_touch=true # Convert (* async_reg = "true" *) to async registers for Yosys. # (* async_reg = "true", dont_touch = "true" *) reg xilinxmultiregimpl0_regs1 = 1'd0; log log XX. Converting (* async_reg = "true" *) attribute to async registers for Yosys log select -list a:async_reg=true setattr -set keep 1 a:async_reg=true synth_xilinx -top top write_edif -pvector bra -attrprop {build_name}.edif """.format(build_name=build_name) ys_name = build_name + ".ys" tools.write_to_file(ys_name, ys_contents) r = subprocess.call(["yosys", ys_name]) if r != 0: raise OSError("Subprocess failed")
def build(self, platform, fragment, build_dir="build", build_name="top", toolchain_path=None, source=None, run=True, mode="xst", **kwargs): if not isinstance(fragment, _Fragment): fragment = fragment.get_fragment() if toolchain_path is None: if sys.platform == "win32": toolchain_path = "C:\\Xilinx" elif sys.platform == "cygwin": toolchain_path = "/cygdrive/c/Xilinx" else: toolchain_path = "/opt/Xilinx" if source is None: source = sys.platform != "win32" platform.finalize(fragment) ngdbuild_opt = self.ngdbuild_opt vns = None tools.mkdir_noerror(build_dir) cwd = os.getcwd() os.chdir(build_dir) try: if mode == "xst" or mode == "yosys": v_output = platform.get_verilog(fragment, name=build_name, **kwargs) vns = v_output.ns named_sc, named_pc = platform.resolve_signals(vns) v_file = build_name + ".v" v_output.write(v_file) sources = platform.sources | {(v_file, "verilog", "work")} if mode == "xst": _build_xst_files(platform.device, sources, platform.verilog_include_paths, build_name, self.xst_opt) isemode = "xst" else: _run_yosys(platform.device, sources, platform.verilog_include_paths, build_name) isemode = "edif" ngdbuild_opt += "-p " + platform.device tools.write_to_file(build_name + ".ucf", _build_ucf(named_sc, named_pc)) if run: _run_ise(build_name, toolchain_path, source, isemode, ngdbuild_opt, self.bitgen_opt, self.ise_commands, self.map_opt, self.par_opt) finally: os.chdir(cwd) return vns
def _run_yosys(device, sources, vincpaths, build_name): ys_contents = "" incflags = "" for path in vincpaths: incflags += " -I" + path for filename, language, library in sources: ys_contents += "read_{}{} {}\n".format(language, incflags, filename) ys_contents += """hierarchy -check -top top proc; memory; opt; fsm; opt synth_xilinx -top top -edif {build_name}.edif""".format(build_name=build_name) ys_name = build_name + ".ys" tools.write_to_file(ys_name, ys_contents) r = subprocess.call(["yosys", ys_name]) if r != 0: raise OSError("Subprocess failed")
def _run_quartus(build_name, quartus_path): build_script_contents = """# Autogenerated by LiteX set -e quartus_map --read_settings_files=on --write_settings_files=off {build_name} -c {build_name} quartus_fit --read_settings_files=off --write_settings_files=off {build_name} -c {build_name} quartus_asm --read_settings_files=off --write_settings_files=off {build_name} -c {build_name} quartus_sta {build_name} -c {build_name} """.format(build_name=build_name) # noqa build_script_file = "build_" + build_name + ".sh" tools.write_to_file(build_script_file, build_script_contents, force_unix=True) if subprocess.call(["bash", build_script_file]): raise OSError("Subprocess failed")
def _build_timing_sdc(vns, clocks, false_paths, build_name, additional_timing_constraints): sdc = [] for clk, period in sorted(clocks.items(), key=lambda x: x[0].duid): sdc.append( "create_clock -name {clk} -period " + str(period) + " [get_nets {clk}]".format(clk=vns.get_name(clk))) for from_, to in sorted(false_paths, key=lambda x: (x[0].duid, x[1].duid)): sdc.append( "set_clock_groups " "-group [get_clocks -include_generated_clocks -of [get_nets {from_}]] " "-group [get_clocks -include_generated_clocks -of [get_nets {to}]] " "-asynchronous".format(from_=from_, to=to)) # generate sdc sdc += additional_timing_constraints tools.write_to_file(build_name + ".sdc", "\n".join(sdc))
def _generate_sim_h(platform): content = """\ #ifndef __SIM_CORE_H_ #define __SIM_CORE_H_ #include "pads.h" """ for args in platform.sim_requested: content += _generate_sim_h_struct(*args) content += """\ #ifndef __cplusplus void litex_sim_init(void **out); #endif #endif /* __SIM_CORE_H_ */ """ tools.write_to_file("dut_header.h", content)
def _run_vivado(build_name, vivado_path, source, ver=None): if sys.platform == "win32" or sys.platform == "cygwin": build_script_contents = "REM Autogenerated by LiteX\n" build_script_contents += "vivado -mode batch -source " + build_name + ".tcl\n" build_script_file = "build_" + build_name + ".bat" tools.write_to_file(build_script_file, build_script_contents) r = subprocess.call([build_script_file]) else: build_script_contents = "# Autogenerated by LiteX\nset -e\n" if vivado_path is None: vivado_path = "/opt/Xilinx/Vivado" settings = common.settings(vivado_path, ver) build_script_contents += "source " + settings + "\n" build_script_contents += "vivado -mode batch -source " + build_name + ".tcl\n" build_script_file = "build_" + build_name + ".sh" tools.write_to_file(build_script_file, build_script_contents) r = subprocess.call(["bash", build_script_file]) if r != 0: raise OSError("Subprocess failed")
def _build_script(source, build_template, build_name, **kwargs): if sys.platform in ("win32", "cygwin"): script_ext = ".bat" build_script_contents = "@echo off\nrem Autogenerated by Migen\n\n" fail_stmt = " || exit /b" else: script_ext = ".sh" build_script_contents = "# Autogenerated by Migen\nset -e\n\n" fail_stmt = "" for s in build_template: s_fail = s + "{fail_stmt}\n" # Required so Windows scripts fail early. build_script_contents += s_fail.format(build_name=build_name, fail_stmt=fail_stmt, **kwargs) build_script_file = "build_" + build_name + script_ext tools.write_to_file(build_script_file, build_script_contents, force_unix=False) return build_script_file
def _build_files(device, sources, vincpaths, named_sc, named_pc, build_name): lines = [] for filename, language, library in sources: # Enforce use of SystemVerilog # (Quartus does not support global parameters in Verilog) if language == "verilog": language = "systemverilog" lines.append( "set_global_assignment -name {lang}_FILE {path} " "-library {lib}".format( lang=language.upper(), path=filename.replace("\\", "/"), lib=library)) for path in vincpaths: lines.append("set_global_assignment -name SEARCH_PATH {}".format( path.replace("\\", "/"))) lines.append(_build_qsf(named_sc, named_pc)) lines.append("set_global_assignment -name DEVICE {}".format(device)) tools.write_to_file("{}.qsf".format(build_name), "\n".join(lines))
def _build_batch(self, platform, sources, edifs, ips, build_name, synth_mode, enable_xpm): assert synth_mode in ["vivado", "yosys"] tcl = [] tcl.append("create_project -force -name {} -part {}".format( build_name, platform.device)) if enable_xpm: tcl.append( "set_property XPM_LIBRARIES {XPM_CDC XPM_MEMORY} [current_project]" ) if synth_mode == "vivado": # "-include_dirs {}" crashes Vivado 2016.4 for filename, language, library in sources: filename_tcl = "{" + filename + "}" tcl.append("add_files " + filename_tcl) if language == "vhdl": tcl.append("set_property library {} [get_files {}]".format( library, filename_tcl)) for filename in edifs: filename_tcl = "{" + filename + "}" tcl.append("read_edif " + filename_tcl) for filename in ips: filename_tcl = "{" + filename + "}" ip = os.path.splitext(os.path.basename(filename))[0] tcl.append("read_ip " + filename_tcl) tcl.append("upgrade_ip [get_ips {}]".format(ip)) tcl.append("generate_target all [get_ips {}]".format(ip)) tcl.append("synth_ip [get_ips {}] -force".format(ip)) tcl.append("get_files -all -of_objects [get_files {}]".format( filename_tcl)) tcl.append("read_xdc {}.xdc".format(build_name)) tcl.extend( c.format(build_name=build_name) for c in self.pre_synthesis_commands) if synth_mode == "vivado": if platform.verilog_include_paths: tcl.append( "synth_design -top {} -part {} -include_dirs {{{}}}". format(build_name, platform.device, " ".join(platform.verilog_include_paths))) else: tcl.append("synth_design -top {} -part {}".format( build_name, platform.device)) elif synth_mode == "yosys": tcl.append("read_edif {}.edif".format(build_name)) tcl.append("link_design -top {} -part {}".format( build_name, platform.device)) else: raise OSError("Unknown synthesis mode! {}".format(synth_mode)) tcl.append("report_timing_summary -file {}_timing_synth.rpt".format( build_name)) tcl.append( "report_utilization -hierarchical -file {}_utilization_hierarchical_synth.rpt" .format(build_name)) tcl.append("report_utilization -file {}_utilization_synth.rpt".format( build_name)) tcl.append("opt_design") tcl.append("place_design") if self.with_phys_opt: tcl.append("phys_opt_design -directive AddRetime") tcl.append( "report_utilization -hierarchical -file {}_utilization_hierarchical_place.rpt" .format(build_name)) tcl.append("report_utilization -file {}_utilization_place.rpt".format( build_name)) tcl.append("report_io -file {}_io.rpt".format(build_name)) tcl.append( "report_control_sets -verbose -file {}_control_sets.rpt".format( build_name)) tcl.append( "report_clock_utilization -file {}_clock_utilization.rpt".format( build_name)) tcl.append("route_design") tcl.append("phys_opt_design") tcl.append("report_timing_summary -no_header -no_detailed_paths") tcl.append("write_checkpoint -force {}_route.dcp".format(build_name)) tcl.append( "report_route_status -file {}_route_status.rpt".format(build_name)) tcl.append("report_drc -file {}_drc.rpt".format(build_name)) tcl.append( "report_timing_summary -datasheet -max_paths 10 -file {}_timing.rpt" .format(build_name)) tcl.append("report_power -file {}_power.rpt".format(build_name)) for bitstream_command in self.bitstream_commands: tcl.append(bitstream_command.format(build_name=build_name)) tcl.append("write_bitstream -force {}.bit ".format(build_name)) for additional_command in self.additional_commands: tcl.append(additional_command.format(build_name=build_name)) tcl.append("quit") tools.write_to_file(build_name + ".tcl", "\n".join(tcl))
def load_bitstream(self, bitstream_file): xcf_file = bitstream_file.replace(".bit", ".xcf") xcf_content = self.xcf_template.format(bitstream_file=bitstream_file) tools.write_to_file(xcf_file, xcf_content) subprocess.call(["pgrcmd", "-infile", xcf_file])
def main(): parser = argparse.ArgumentParser(description="Opsis LiteX SoC", conflict_handler='resolve') get_args(parser) builder_args(parser) soc_sdram_args(parser) args = parser.parse_args() platform = get_platform(args) soc = get_soc(args, platform) builddir = get_builddir(args) testdir = get_testdir(args) buildargs = builder_argdict(args) if not buildargs.get('output_dir', None): buildargs['output_dir'] = builddir if hasattr(soc, 'cpu_type'): if not buildargs.get('csr_csv', None): buildargs['csr_csv'] = os.path.join(testdir, "csr.csv") if not buildargs.get('csr_json', None): buildargs['csr_json'] = os.path.join(testdir, "csr.json") builder = Builder(soc, **buildargs) if builder.bios_options is None: builder.bios_options = {} # using 'bios_options' is a hack, as those settings # are intended for the firmware (not bios) - there # is just no dictionary for 'firmware_options' # available in LiteX; # the intended effect of setting those options is to # generate entries in the .mak files that can be used # in the Makefile if 'user_flash' in soc.mem_regions: builder.bios_options['EXECUTE_IN_PLACE'] = 1 else: builder.bios_options['COPY_TO_MAIN_RAM'] = 1 if not args.no_compile_firmware or args.override_firmware: builder.add_software_package("uip", "{}/firmware/uip".format(os.getcwd())) # FIXME: All platforms which current run their user programs from # SPI flash lack the block RAM resources to run the default # firmware. Check whether to use the stub or default firmware # should be refined (perhaps soc attribute?). if "main_ram" in soc.mem_regions: builder.add_software_package("firmware", "{}/firmware".format(os.getcwd())) else: builder.add_software_package( "stub", "{}/firmware/stub".format(os.getcwd())) vns = builder.build(**dict(args.build_option)) else: vns = platform.build(soc, build_dir=os.path.join(builddir, "gateware")) if hasattr(soc, 'pcie_phy'): from litex.soc.integration.export import get_csr_header, get_soc_header csr_header = get_csr_header(soc.csr_regions, soc.constants, with_access_functions=False) soc_header = get_soc_header(soc.constants, with_access_functions=False) kerneldir = os.path.join(builddir, "software", "pcie", "kernel") os.makedirs(kerneldir, exist_ok=True) write_to_file(os.path.join(kerneldir, "csr.h"), csr_header) write_to_file(os.path.join(kerneldir, "soc.h"), soc_header) if hasattr(soc, 'do_exit'): soc.do_exit(vns, filename="{}/analyzer.csv".format(testdir))
if actions["all"]: actions["build-csr-csv"] = True actions["build-csr-header"] = True actions["build-bitstream"] = True actions["load-bitstream"] = True if actions["build-bitstream"]: actions["build-csr-csv"] = True actions["build-csr-header"] = True if actions["clean"]: subprocess.call(["rm", "-rf", "build/*"]) if actions["build-csr-csv"]: csr_csv = cpu_interface.get_csr_csv(csr_regions) write_to_file(args.csr_csv, csr_csv) if actions["build-csr-header"]: csr_header = cpu_interface.get_csr_header(csr_regions, soc.get_constants(), with_access_functions=False) write_to_file(args.csr_header, csr_header) if actions["build-bitstream"]: build_kwargs = dict((k, autotype(v)) for k, v in args.build_option) vns = platform.build(soc, build_name=build_name, **build_kwargs) if hasattr(soc, "do_exit") and vns is not None: if hasattr(soc.do_exit, '__call__'): soc.do_exit(vns) if actions["load-bitstream"]:
def _run_ise(build_name, mode, ngdbuild_opt, toolchain, platform): if sys.platform == "win32" or sys.platform == "cygwin": script_ext = ".bat" shell = ["cmd", "/c"] build_script_contents = "@echo off\nrem Autogenerated by LiteX / git: " + tools.get_litex_git_revision( ) + "\n" fail_stmt = " || exit /b" else: script_ext = ".sh" shell = ["bash"] build_script_contents = "# Autogenerated by LiteX / git: " + tools.get_litex_git_revision( ) + "\nset -e\n" if os.getenv("LITEX_ENV_ISE", False): build_script_contents += "source " + os.path.join( os.getenv("LITEX_ENV_ISE"), "settings64.sh\n") fail_stmt = "" if mode == "edif": ext = "ngo" build_script_contents += """ edif2ngd {build_name}.edif {build_name}.{ext}{fail_stmt} """ else: ext = "ngc" build_script_contents += """ xst -ifn {build_name}.xst{fail_stmt} """ # This generates a .v file for post synthesis simulation build_script_contents += """ netgen -ofmt verilog -w -sim {build_name}.{ext} {build_name}_synth.v """ build_script_contents += """ ngdbuild {ngdbuild_opt} -uc {build_name}.ucf {build_name}.{ext} {build_name}.ngd{fail_stmt} """ if mode == "cpld": build_script_contents += """ cpldfit -ofmt verilog {par_opt} -p {device} {build_name}.ngd{fail_stmt} taengine -f {build_name}.vm6 -detail -iopath -l {build_name}.tim{fail_stmt} hprep6 -s IEEE1532 -i {build_name}.vm6{fail_stmt} """ else: build_script_contents += """ map {map_opt} -o {build_name}_map.ncd {build_name}.ngd {build_name}.pcf{fail_stmt} par {par_opt} {build_name}_map.ncd {build_name}.ncd {build_name}.pcf{fail_stmt} bitgen {bitgen_opt} {build_name}.ncd {build_name}.bit{fail_stmt} """ build_script_contents = build_script_contents.format( build_name=build_name, ngdbuild_opt=ngdbuild_opt, bitgen_opt=toolchain.bitgen_opt, ext=ext, par_opt=toolchain.par_opt, map_opt=toolchain.map_opt, device=platform.device, fail_stmt=fail_stmt) build_script_contents += toolchain.ise_commands.format( build_name=build_name) build_script_file = "build_" + build_name + script_ext tools.write_to_file(build_script_file, build_script_contents, force_unix=False) command = shell + [build_script_file] if which("ise") is None and os.getenv("LITEX_ENV_ISE", False) == False: msg = "Unable to find or source ISE toolchain, please either:\n" msg += "- Source ISE's settings manually.\n" msg += "- Or set LITEX_ENV_ISE environment variant to ISE's settings path.\n" msg += "- Or add ISE toolchain to your $PATH." raise OSError(msg) if tools.subprocess_call_filtered(command, common.colors) != 0: raise OSError("Error occured during ISE's script execution.")
def build(self, platform, fragment, build_dir="build", build_name="top", toolchain_path=None, source=True, run=True, mode="xst", **kwargs): # Get default toolchain path (if not specified) if toolchain_path is None: if sys.platform == "win32": toolchain_path = "C:\\Xilinx" elif sys.platform == "cygwin": toolchain_path = "/cygdrive/c/Xilinx" else: toolchain_path = "/opt/Xilinx" # Create build directory os.makedirs(build_dir, exist_ok=True) cwd = os.getcwd() os.chdir(build_dir) # Finalize design if not isinstance(fragment, _Fragment): fragment = fragment.get_fragment() platform.finalize(fragment) vns = None try: if mode in ["xst", "yosys", "cpld"]: # Generate verilog v_output = platform.get_verilog(fragment, name=build_name, **kwargs) vns = v_output.ns named_sc, named_pc = platform.resolve_signals(vns) v_file = build_name + ".v" v_output.write(v_file) platform.add_source(v_file) # Generate design project (.xst) if mode in ["xst", "cpld"]: _build_xst(platform.device, platform.sources, platform.verilog_include_paths, build_name, self.xst_opt) isemode = mode else: # Run Yosys if run: _run_yosys(platform.device, platform.sources, platform.verilog_include_paths, build_name) isemode = "edif" self.ngdbuild_opt += "-p " + platform.device if mode in ["edif"]: # Generate edif e_output = platform.get_edif(fragment) vns = e_output.ns named_sc, named_pc = platform.resolve_signals(vns) e_file = build_name + ".edif" e_output.write(e_file) isemode = "edif" # Generate design constraints (.ucf) tools.write_to_file(build_name + ".ucf", _build_ucf(named_sc, named_pc)) # Run ISE if run: _run_ise(build_name, toolchain_path, source, isemode, self.ngdbuild_opt, self, platform) finally: os.chdir(cwd) return vns
def _generate_sim_config(config): content = config.get_json() tools.write_to_file("sim_config.js", content)
def _build_fp_pdc(build_name, additional_fp_constraints): pdc = "\n".join(additional_fp_constraints) tools.write_to_file(build_name + "_fp.pdc", pdc)
def build_timing_constraints(self, vns): r = "" for clk, period in self.clocks.items(): r += """ctx.addClock("{}", {})\n""".format(vns.get_name(clk), 1e3/period) tools.write_to_file(self._build_name + "_pre_pack.py", r) return (self._build_name + "_pre_pack.py", "PY")
# dependencies if actions["all"]: actions["build-csr-csv"] = True actions["build-bitstream"] = True actions["load-bitstream"] = True if actions["build-bitstream"]: actions["build-csr-csv"] = True if actions["clean"]: subprocess.call(["rm", "-rf", "build/*"]) if actions["build-csr-csv"]: csr_csv = cpu_interface.get_csr_csv(csr_regions, csr_constants) write_to_file(args.csr_csv, csr_csv) if actions["build-core"]: soc_fragment = soc.get_fragment() platform.finalize(soc_fragment) v_output = platform.get_verilog( soc_fragment, name="litescope", special_overrides=xilinx_special_overrides) v_output.write("build/litescope.v") if actions["build-bitstream"]: build_kwargs = dict((k, autotype(v)) for k, v in args.build_option) vns = platform.build(soc, build_name=build_name, **build_kwargs) if hasattr(soc, "do_exit") and vns is not None: if hasattr(soc.do_exit, '__call__'):
def _generate_includes(self): # Generate Include/Generated directories. _create_dir(self.include_dir) _create_dir(self.generated_dir) # Generate BIOS files when the SoC uses it. with_bios = self.soc.cpu_type not in [None, "zynq7000"] if with_bios: # Generate Variables to variables.mak. variables_contents = [] def define(k, v): variables_contents.append("{}={}".format( k, _makefile_escape(v))) # Define the CPU variables. for k, v in export.get_cpu_mak(self.soc.cpu, self.compile_software): define(k, v) # Define the SoC/Compiler-RT/Software/Include directories. define("SOC_DIRECTORY", soc_directory) define("COMPILER_RT_DIRECTORY", compiler_rt_directory) variables_contents.append("export BUILDINC_DIRECTORY") define("BUILDINC_DIRECTORY", self.include_dir) for name, src_dir in self.software_packages: define(name.upper() + "_DIRECTORY", src_dir) # Define the BIOS Options. for bios_option in self.bios_options: assert bios_option in [ "TERM_NO_HIST", "TERM_MINI", "TERM_NO_COMPLETE" ] define(bios_option, "1") # Write to variables.mak. write_to_file(os.path.join(self.generated_dir, "variables.mak"), "\n".join(variables_contents)) # Generate Output Format to output_format.ld. output_format_contents = export.get_linker_output_format( self.soc.cpu) write_to_file(os.path.join(self.generated_dir, "output_format.ld"), output_format_contents) # Generate Memory Regions to regions.ld. regions_contents = export.get_linker_regions(self.soc.mem_regions) write_to_file(os.path.join(self.generated_dir, "regions.ld"), regions_contents) # Generate Memory Regions to mem.h. mem_contents = export.get_mem_header(self.soc.mem_regions) write_to_file(os.path.join(self.generated_dir, "mem.h"), mem_contents) # Generate Memory Regions to memory.x if specified. if self.memory_x is not None: memory_x_contents = export.get_memory_x(self.soc) write_to_file(os.path.realpath(self.memory_x), memory_x_contents) # Generate SoC Config/Constants to soc.h. soc_contents = export.get_soc_header(self.soc.constants) write_to_file(os.path.join(self.generated_dir, "soc.h"), soc_contents) # Generate CSR registers definitions/access functions to csr.h. csr_contents = export.get_csr_header( regions=self.soc.csr_regions, constants=self.soc.constants, csr_base=self.soc.mem_regions['csr'].origin) write_to_file(os.path.join(self.generated_dir, "csr.h"), csr_contents) # Generate Git SHA1 of tools to git.h git_contents = export.get_git_header() write_to_file(os.path.join(self.generated_dir, "git.h"), git_contents) # Generate LiteDRAM C header to sdram_phy.h when the SoC use it. if hasattr(self.soc, "sdram"): from litedram.init import get_sdram_phy_c_header sdram_contents = get_sdram_phy_c_header( self.soc.sdram.controller.settings.phy, self.soc.sdram.controller.settings.timing) write_to_file(os.path.join(self.generated_dir, "sdram_phy.h"), sdram_contents)
def _generate_includes(self): os.makedirs(self.include_dir, exist_ok=True) os.makedirs(self.generated_dir, exist_ok=True) if self.soc.cpu_type is not None: variables_contents = [] def define(k, v): variables_contents.append("{}={}\n".format( k, _makefile_escape(v))) for k, v in export.get_cpu_mak(self.soc.cpu, self.compile_software): define(k, v) # Distinguish between LiteX and MiSoC. define("LITEX", "1") # Distinguish between applications running from main RAM and # flash for user-provided software packages. exec_profiles = {"COPY_TO_MAIN_RAM": "0", "EXECUTE_IN_PLACE": "0"} if "main_ram" in self.soc.mem_regions.keys(): exec_profiles["COPY_TO_MAIN_RAM"] = "1" else: exec_profiles["EXECUTE_IN_PLACE"] = "1" for k, v in exec_profiles.items(): define(k, v) define("COMPILER_RT_DIRECTORY", get_data_mod("software", "compiler_rt").data_location) define("SOC_DIRECTORY", soc_directory) variables_contents.append("export BUILDINC_DIRECTORY\n") define("BUILDINC_DIRECTORY", self.include_dir) for name, src_dir in self.software_packages: define(name.upper() + "_DIRECTORY", src_dir) if self.bios_options is not None: for option in self.bios_options: define(option, "1") write_to_file(os.path.join(self.generated_dir, "variables.mak"), "".join(variables_contents)) write_to_file(os.path.join(self.generated_dir, "output_format.ld"), export.get_linker_output_format(self.soc.cpu)) write_to_file(os.path.join(self.generated_dir, "regions.ld"), export.get_linker_regions(self.soc.mem_regions)) write_to_file(os.path.join(self.generated_dir, "mem.h"), export.get_mem_header(self.soc.mem_regions)) write_to_file(os.path.join(self.generated_dir, "soc.h"), export.get_soc_header(self.soc.constants)) write_to_file( os.path.join(self.generated_dir, "csr.h"), export.get_csr_header(self.soc.csr_regions, self.soc.constants)) write_to_file(os.path.join(self.generated_dir, "git.h"), export.get_git_header()) if hasattr(self.soc, "sdram"): from litedram.init import get_sdram_phy_c_header write_to_file( os.path.join(self.generated_dir, "sdram_phy.h"), get_sdram_phy_c_header( self.soc.sdram.controller.settings.phy, self.soc.sdram.controller.settings.timing))
def build_placement_constraints(self): pdc = "\n".join(self.additional_fp_constraints) tools.write_to_file(self._build_name + "_fp.pdc", pdc) return (self._build_name + "_fp.pdc", "PDC")
def build_script(self): if sys.platform == "win32" or sys.platform == "cygwin": script_ext = ".bat" shell = ["cmd", "/c"] build_script_contents = "@echo off\nrem Autogenerated by LiteX / git: " + tools.get_litex_git_revision( ) + "\n" fail_stmt = " || exit /b" else: script_ext = ".sh" shell = ["bash"] build_script_contents = "# Autogenerated by LiteX / git: " + tools.get_litex_git_revision( ) + "\nset -e\n" if os.getenv("LITEX_ENV_ISE", False): build_script_contents += "source " + os.path.join( os.getenv("LITEX_ENV_ISE"), "settings64.sh\n") fail_stmt = "" if self._isemode == "edif": ext = "ngo" build_script_contents += """ edif2ngd {build_name}.edif {build_name}.{ext}{fail_stmt} """ else: ext = "ngc" build_script_contents += """ xst -ifn {build_name}.xst{fail_stmt} """ # This generates a .v file for post synthesis simulation build_script_contents += """ netgen -ofmt verilog -w -sim {build_name}.{ext} {build_name}_synth.v """ build_script_contents += """ ngdbuild {ngdbuild_opt} -uc {build_name}.ucf {build_name}.{ext} {build_name}.ngd{fail_stmt} """ if self._isemode == "cpld": build_script_contents += """ cpldfit -ofmt verilog {par_opt} -p {device} {build_name}.ngd{fail_stmt} taengine -f {build_name}.vm6 -detail -iopath -l {build_name}.tim{fail_stmt} hprep6 -s IEEE1532 -i {build_name}.vm6{fail_stmt} """ else: build_script_contents += """ map {map_opt} -o {build_name}_map.ncd {build_name}.ngd {build_name}.pcf{fail_stmt} par {par_opt} {build_name}_map.ncd {build_name}.ncd {build_name}.pcf{fail_stmt} bitgen {bitgen_opt} {build_name}.ncd {build_name}.bit{fail_stmt} """ build_script_contents = build_script_contents.format( build_name=self._build_name, ngdbuild_opt=self.ngdbuild_opt, bitgen_opt=self.bitgen_opt, ext=ext, par_opt=self.par_opt, map_opt=self.map_opt, device=self.platform.device, fail_stmt=fail_stmt) build_script_contents += self.ise_commands.format( build_name=self._build_name) build_script_file = "build_" + self._build_name + script_ext tools.write_to_file(build_script_file, build_script_contents, force_unix=False) return build_script_file
def _generate_includes(self, with_bios=True): # Generate Include/Generated directories. _create_dir(self.include_dir) _create_dir(self.generated_dir) # Generate BIOS files when the SoC uses it. if with_bios: # Generate Variables to variables.mak. variables_contents = self._get_variables_contents() write_to_file(os.path.join(self.generated_dir, "variables.mak"), variables_contents) # Generate Output Format to output_format.ld. output_format_contents = export.get_linker_output_format( self.soc.cpu) write_to_file(os.path.join(self.generated_dir, "output_format.ld"), output_format_contents) # Generate Memory Regions to regions.ld. regions_contents = export.get_linker_regions(self.soc.mem_regions) write_to_file(os.path.join(self.generated_dir, "regions.ld"), regions_contents) # Generate Memory Regions to mem.h. mem_contents = export.get_mem_header(self.soc.mem_regions) write_to_file(os.path.join(self.generated_dir, "mem.h"), mem_contents) # Generate Memory Regions to memory.x if specified. if self.memory_x is not None: memory_x_contents = export.get_memory_x(self.soc) write_to_file(os.path.realpath(self.memory_x), memory_x_contents) # Generate SoC Config/Constants to soc.h. soc_contents = export.get_soc_header(self.soc.constants) write_to_file(os.path.join(self.generated_dir, "soc.h"), soc_contents) # Generate CSR registers definitions/access functions to csr.h. csr_contents = export.get_csr_header( regions=self.soc.csr_regions, constants=self.soc.constants, csr_base=self.soc.mem_regions['csr'].origin) write_to_file(os.path.join(self.generated_dir, "csr.h"), csr_contents) # Generate Git SHA1 of tools to git.h git_contents = export.get_git_header() write_to_file(os.path.join(self.generated_dir, "git.h"), git_contents) # Generate LiteDRAM C header to sdram_phy.h when the SoC use it if hasattr(self.soc, "sdram"): from litedram.init import get_sdram_phy_c_header sdram_contents = get_sdram_phy_c_header( self.soc.sdram.controller.settings.phy, self.soc.sdram.controller.settings.timing) write_to_file(os.path.join(self.generated_dir, "sdram_phy.h"), sdram_contents)
def _run_ise(build_name, ise_path, source, mode, ngdbuild_opt, toolchain, platform, ver=None): if sys.platform == "win32" or sys.platform == "cygwin": source_cmd = "call " script_ext = ".bat" shell = ["cmd", "/c"] build_script_contents = "@echo off\nrem Autogenerated by LiteX / git: " + tools.get_litex_git_revision( ) + "\n" fail_stmt = " || exit /b" else: source_cmd = "source " script_ext = ".sh" shell = ["bash"] build_script_contents = "# Autogenerated by LiteX / git: " + tools.get_litex_git_revision( ) + "\nset -e\n" fail_stmt = "" if source: settings = common.settings(ise_path, ver, "ISE_DS") build_script_contents += source_cmd + tools.cygpath(settings) + "\n" if mode == "edif": ext = "ngo" build_script_contents += """ edif2ngd {build_name}.edif {build_name}.{ext}{fail_stmt} """ else: ext = "ngc" build_script_contents += """ xst -ifn {build_name}.xst{fail_stmt} """ # This generates a .v file for post synthesis simulation build_script_contents += """ netgen -ofmt verilog -w -sim {build_name}.{ext} {build_name}_synth.v """ build_script_contents += """ ngdbuild {ngdbuild_opt} -uc {build_name}.ucf {build_name}.{ext} {build_name}.ngd{fail_stmt} """ if mode == "cpld": build_script_contents += """ cpldfit -ofmt verilog {par_opt} -p {device} {build_name}.ngd{fail_stmt} taengine -f {build_name}.vm6 -detail -iopath -l {build_name}.tim{fail_stmt} hprep6 -s IEEE1532 -i {build_name}.vm6{fail_stmt} """ else: build_script_contents += """ map {map_opt} -o {build_name}_map.ncd {build_name}.ngd {build_name}.pcf{fail_stmt} par {par_opt} {build_name}_map.ncd {build_name}.ncd {build_name}.pcf{fail_stmt} bitgen {bitgen_opt} {build_name}.ncd {build_name}.bit{fail_stmt} """ build_script_contents = build_script_contents.format( build_name=build_name, ngdbuild_opt=ngdbuild_opt, bitgen_opt=toolchain.bitgen_opt, ext=ext, par_opt=toolchain.par_opt, map_opt=toolchain.map_opt, device=platform.device, fail_stmt=fail_stmt) build_script_contents += toolchain.ise_commands.format( build_name=build_name) build_script_file = "build_" + build_name + script_ext tools.write_to_file(build_script_file, build_script_contents, force_unix=False) command = shell + [build_script_file] r = tools.subprocess_call_filtered(command, common.colors) if r != 0: raise OSError("Subprocess failed")
def _build_tcl(platform, sources, build_dir, build_name): tcl = [] # create project tcl.append(" ".join([ "new_project", "-location {./impl}", "-name {}".format(tcl_name(build_name)), "-project_description {}", "-block_mode 0", "-standalone_peripheral_initialization 0", "-instantiate_in_smartdesign 1", "-ondemand_build_dh 0", "-use_enhanced_constraint_flow 0", "-hdl {VERILOG}", "-family {PolarFire}", "-die {}", "-package {}", "-speed {}", "-die_voltage {}", "-part_range {}", "-adv_options {}" ])) die, package, speed = platform.device.split("-") tcl.append(" ".join([ "set_device", "-family {PolarFire}", "-die {}".format(tcl_name(die)), "-package {}".format(tcl_name(package)), "-speed {}".format(tcl_name("-" + speed)), # FIXME: common to all PolarFire devices? "-die_voltage {1.0}", "-part_range {EXT}", "-adv_options {IO_DEFT_STD:LVCMOS 1.8V}", "-adv_options {RESTRICTPROBEPINS:1}", "-adv_options {RESTRICTSPIPINS:0}", "-adv_options {TEMPR:EXT}", "-adv_options {UNUSED_MSS_IO_RESISTOR_PULL:None}", "-adv_options {VCCI_1.2_VOLTR:EXT}", "-adv_options {VCCI_1.5_VOLTR:EXT}", "-adv_options {VCCI_1.8_VOLTR:EXT}", "-adv_options {VCCI_2.5_VOLTR:EXT}", "-adv_options {VCCI_3.3_VOLTR:EXT}", "-adv_options {VOLTR:EXT} " ])) # add files for filename, language, library in sources: filename_tcl = "{" + filename + "}" tcl.append("import_files -hdl_source " + filename_tcl) # set top tcl.append("set_root -module {}".format(tcl_name(build_name))) # copy init files FIXME: support for include path on LiberoSoC? for file in os.listdir(build_dir): if file.endswith(".init"): tcl.append("file copy -- {} impl/synthesis".format(file)) # import io constraints tcl.append("import_files -io_pdc {}".format(tcl_name(build_name + "_io.pdc"))) # import floorplanner constraints tcl.append("import_files -fp_pdc {}".format(tcl_name(build_name + "_fp.pdc"))) # import timing constraints tcl.append("import_files -convert_EDN_to_HDL 0 -sdc {}".format(tcl_name(build_name + ".sdc"))) # associate constraints with tools tcl.append(" ".join(["organize_tool_files", "-tool {SYNTHESIZE}", "-file impl/constraint/{}.sdc".format(build_name), "-module {}".format(build_name), "-input_type {constraint}" ])) tcl.append(" ".join(["organize_tool_files", "-tool {PLACEROUTE}", "-file impl/constraint/io/{}_io.pdc".format(build_name), "-file impl/constraint/fp/{}_fp.pdc".format(build_name), "-file impl/constraint/{}.sdc".format(build_name), "-module {}".format(build_name), "-input_type {constraint}" ])) tcl.append(" ".join(["organize_tool_files", "-tool {VERIFYTIMING}", "-file impl/constraint/{}.sdc".format(build_name), "-module {}".format(build_name), "-input_type {constraint}" ])) # build flow tcl.append("run_tool -name {CONSTRAINT_MANAGEMENT}") tcl.append("run_tool -name {SYNTHESIZE}") tcl.append("run_tool -name {PLACEROUTE}") tcl.append("run_tool -name {GENERATEPROGRAMMINGDATA}") tcl.append("run_tool -name {GENERATEPROGRAMMINGFILE}") # generate tcl tools.write_to_file(build_name + ".tcl", "\n".join(tcl))
def add_sources(platform, use_ghdl_yosys_plugin=False): sources = [ # Common / Types / Helpers. "decode_types.vhdl", "wishbone_types.vhdl", "utils.vhdl", "common.vhdl", "helpers.vhdl", "nonrandom.vhdl", # Fetch. "fetch1.vhdl", # Instruction/Data Cache. "cache_ram.vhdl", "plru.vhdl", "dcache.vhdl", "icache.vhdl", # Decode. "insn_helpers.vhdl", "decode1.vhdl", "control.vhdl", "decode2.vhdl", # Register/CR File. "register_file.vhdl", "crhelpers.vhdl", "cr_file.vhdl", # Execute. "ppc_fx_insns.vhdl", "logical.vhdl", "rotator.vhdl", "countbits.vhdl", "execute1.vhdl", # Load/Store. "loadstore1.vhdl", # Divide. "divider.vhdl", # FPU. "fpu.vhdl", # PMU. "pmu.vhdl", # Writeback. "writeback.vhdl", # MMU. "mmu.vhdl", # Core. "core_debug.vhdl", "core.vhdl", ] from litex.build.xilinx import XilinxPlatform if isinstance(platform, XilinxPlatform) and not use_ghdl_yosys_plugin: sources.append("xilinx-mult.vhdl") else: sources.append("multiply.vhdl") sdir = get_data_mod("cpu", "microwatt").data_location cdir = os.path.dirname(__file__) # Convert VHDL to Verilog through GHDL/Yosys. if use_ghdl_yosys_plugin: from litex.build import tools import subprocess ys = [] ys.append( "ghdl --ieee=synopsys -fexplicit -frelaxed-rules --std=08 \\") for source in sources: ys.append(os.path.join(sdir, source) + " \\") ys.append( os.path.join(os.path.dirname(__file__), "microwatt_wrapper.vhdl") + " \\") ys.append("-e microwatt_wrapper") ys.append("chformal -assert -remove") ys.append("write_verilog {}".format( os.path.join(cdir, "microwatt.v"))) tools.write_to_file(os.path.join(cdir, "microwatt.ys"), "\n".join(ys)) if subprocess.call([ "yosys", "-q", "-m", "ghdl", os.path.join(cdir, "microwatt.ys") ]): raise OSError( "Unable to convert Microwatt CPU to verilog, please check your GHDL-Yosys-plugin install" ) platform.add_source(os.path.join(cdir, "microwatt.v")) # Direct use of VHDL sources. else: platform.add_sources(sdir, *sources) platform.add_source( os.path.join(os.path.dirname(__file__), "microwatt_wrapper.vhdl"))
def _generate_makefile(self, platform, build_name): Var = _MakefileGenerator.Var Rule = _MakefileGenerator.Rule makefile = _MakefileGenerator([ "# Autogenerated by LiteX / git: " + tools.get_litex_git_revision() + "\n", Var("TOP", build_name), Var("PARTNAME", self._partname), Var("DEVICE", self.symbiflow_device), Var("BITSTREAM_DEVICE", self.bitstream_device), "", Var("VERILOG", [ f for f, language, _ in platform.sources if language in ["verilog", "system_verilog"] ]), Var("MEM_INIT", [f"{name}" for name in os.listdir() if name.endswith(".init")]), Var("SDC", f"{build_name}.sdc"), Var("XDC", f"{build_name}.xdc"), Var("ARTIFACTS", [ "$(TOP).eblif", "$(TOP).frames", "$(TOP).ioplace", "$(TOP).net", "$(TOP).place", "$(TOP).route", "$(TOP)_synth.*", "*.bit", "*.fasm", "*.json", "*.log", "*.rpt", "constraints.place" ]), Rule("all", ["$(TOP).bit"], phony=True), Rule( "$(TOP).eblif", ["$(VERILOG)", "$(MEM_INIT)", "$(XDC)"], commands=[ "symbiflow_synth -t $(TOP) -v $(VERILOG) -d $(BITSTREAM_DEVICE) -p $(PARTNAME) -x $(XDC) > /dev/null" ]), Rule( "$(TOP).net", ["$(TOP).eblif", "$(SDC)"], commands=[ "symbiflow_pack -e $(TOP).eblif -d $(DEVICE) -s $(SDC) > /dev/null" ]), Rule( "$(TOP).place", ["$(TOP).net"], commands=[ "symbiflow_place -e $(TOP).eblif -d $(DEVICE) -n $(TOP).net -P $(PARTNAME) -s $(SDC) > /dev/null" ]), Rule( "$(TOP).route", ["$(TOP).place"], commands=[ "symbiflow_route -e $(TOP).eblif -d $(DEVICE) -s $(SDC) > /dev/null" ]), Rule( "$(TOP).fasm", ["$(TOP).route"], commands=[ "symbiflow_write_fasm -e $(TOP).eblif -d $(DEVICE) > /dev/null" ]), Rule( "$(TOP).bit", ["$(TOP).fasm"], commands=[ "symbiflow_write_bitstream -d $(BITSTREAM_DEVICE) -f $(TOP).fasm -p $(PARTNAME) -b $(TOP).bit > /dev/null" ]), Rule("clean", phony=True, commands=["rm -f $(ARTIFACTS)"]), ]) tools.write_to_file("Makefile", makefile.generate())
def _build_tcl(self, platform, build_name, synth_mode, enable_xpm): assert synth_mode in ["vivado", "yosys"] tcl = [] # Create project tcl.append("\n# Create Project\n") tcl.append("create_project -force -name {} -part {}".format( build_name, platform.device)) tcl.append("set_msg_config -id {Common 17-55} -new_severity {Warning}") # Enable Xilinx Parameterized Macros if enable_xpm: tcl.append("\n# Enable Xilinx Parameterized Macros\n") tcl.append( "set_property XPM_LIBRARIES {XPM_CDC XPM_MEMORY} [current_project]" ) # Add sources (when Vivado used for synthesis) if synth_mode == "vivado": tcl.append("\n# Add Sources\n") # "-include_dirs {}" crashes Vivado 2016.4 for filename, language, library in platform.sources: filename_tcl = "{" + filename + "}" if (language == "systemverilog"): tcl.append("read_verilog -sv " + filename_tcl) elif (language == "verilog"): tcl.append("read_verilog " + filename_tcl) elif (language == "vhdl"): tcl.append("read_vhdl -vhdl2008 " + filename_tcl) tcl.append("set_property library {} [get_files {}]".format( library, filename_tcl)) else: tcl.append("add_files " + filename_tcl) # Add EDIFs tcl.append("\n# Add EDIFs\n") for filename in platform.edifs: filename_tcl = "{" + filename + "}" tcl.append("read_edif " + filename_tcl) # Add IPs tcl.append("\n# Add IPs\n") for filename in platform.ips: filename_tcl = "{" + filename + "}" ip = os.path.splitext(os.path.basename(filename))[0] tcl.append("read_ip " + filename_tcl) tcl.append("upgrade_ip [get_ips {}]".format(ip)) tcl.append("generate_target all [get_ips {}]".format(ip)) tcl.append("synth_ip [get_ips {}] -force".format(ip)) tcl.append("get_files -all -of_objects [get_files {}]".format( filename_tcl)) # Add constraints tcl.append("\n# Add constraints\n") tcl.append("read_xdc {}.xdc".format(build_name)) # Add pre-synthesis commands tcl.append("\n# Add pre-synthesis commands\n") tcl.extend( c.format(build_name=build_name) for c in self.pre_synthesis_commands) # Synthesis if synth_mode == "vivado": tcl.append("\n# Synthesis\n") synth_cmd = "synth_design -directive {} -top {} -part {}".format( self.vivado_synth_directive, build_name, platform.device) if platform.verilog_include_paths: synth_cmd += " -include_dirs {{{}}}".format(" ".join( platform.verilog_include_paths)) tcl.append(synth_cmd) elif synth_mode == "yosys": tcl.append("\n# Read Yosys EDIF\n") tcl.append("read_edif {}.edif".format(build_name)) tcl.append("link_design -top {} -part {}".format( build_name, platform.device)) else: raise OSError("Unknown synthesis mode! {}".format(synth_mode)) tcl.append("\n# Synthesis report\n") tcl.append("report_timing_summary -file {}_timing_synth.rpt".format( build_name)) tcl.append( "report_utilization -hierarchical -file {}_utilization_hierarchical_synth.rpt" .format(build_name)) tcl.append("report_utilization -file {}_utilization_synth.rpt".format( build_name)) # Optimize tcl.append("\n# Optimize design\n") tcl.append("opt_design -directive {}".format(self.opt_directive)) # Incremental implementation if self.incremental_implementation: tcl.append("\n# Read design checkpoint\n") tcl.append( "read_checkpoint -incremental {}_route.dcp".format(build_name)) # Add pre-placement commands tcl.append("\n# Add pre-placement commands\n") tcl.extend( c.format(build_name=build_name) for c in self.pre_placement_commands) # Placement tcl.append("\n# Placement\n") tcl.append("place_design -directive {}".format( self.vivado_place_directive)) if self.vivado_post_place_phys_opt_directive: tcl.append("phys_opt_design -directive {}".format( self.vivado_post_place_phys_opt_directive)) tcl.append("\n# Placement report\n") tcl.append( "report_utilization -hierarchical -file {}_utilization_hierarchical_place.rpt" .format(build_name)) tcl.append("report_utilization -file {}_utilization_place.rpt".format( build_name)) tcl.append("report_io -file {}_io.rpt".format(build_name)) tcl.append( "report_control_sets -verbose -file {}_control_sets.rpt".format( build_name)) tcl.append( "report_clock_utilization -file {}_clock_utilization.rpt".format( build_name)) # Add pre-routing commands tcl.append("\n# Add pre-routing commands\n") tcl.extend( c.format(build_name=build_name) for c in self.pre_routing_commands) # Routing tcl.append("\n# Routing\n") tcl.append("route_design -directive {}".format( self.vivado_route_directive)) tcl.append("phys_opt_design -directive {}".format( self.vivado_post_route_phys_opt_directive)) tcl.append("write_checkpoint -force {}_route.dcp".format(build_name)) tcl.append("\n# Routing report\n") tcl.append("report_timing_summary -no_header -no_detailed_paths") tcl.append( "report_route_status -file {}_route_status.rpt".format(build_name)) tcl.append("report_drc -file {}_drc.rpt".format(build_name)) tcl.append( "report_timing_summary -datasheet -max_paths 10 -file {}_timing.rpt" .format(build_name)) tcl.append("report_power -file {}_power.rpt".format(build_name)) for bitstream_command in self.bitstream_commands: tcl.append(bitstream_command.format(build_name=build_name)) # Bitstream generation tcl.append("\n# Bitstream generation\n") tcl.append("write_bitstream -force {}.bit ".format(build_name)) for additional_command in self.additional_commands: tcl.append(additional_command.format(build_name=build_name)) # Quit tcl.append("\n# End\n") tcl.append("quit") tools.write_to_file(build_name + ".tcl", "\n".join(tcl))
def _build_batch(self, platform, sources, edifs, build_name): tcl = [] tcl.append("create_project -force -name {} -part {}".format( build_name, platform.device)) for filename, language, library in sources: filename_tcl = "{" + filename + "}" tcl.append("add_files " + filename_tcl) tcl.append("set_property library {} [get_files {}]".format( library, filename_tcl)) for filename in edifs: filename_tcl = "{" + filename + "}" tcl.append("read_edif " + filename_tcl) tcl.append("read_xdc {}.xdc".format(build_name)) tcl.extend( c.format(build_name=build_name) for c in self.pre_synthesis_commands) # "-include_dirs {}" crashes Vivado 2016.4 if platform.verilog_include_paths: tcl.append( "synth_design -top {} -part {} -include_dirs {{{}}}".format( build_name, platform.device, " ".join(platform.verilog_include_paths))) else: tcl.append("synth_design -top {} -part {}".format( build_name, platform.device)) tcl.append("report_timing_summary -file {}_timing_synth.rpt".format( build_name)) tcl.append( "report_utilization -hierarchical -file {}_utilization_hierarchical_synth.rpt" .format(build_name)) tcl.append("report_utilization -file {}_utilization_synth.rpt".format( build_name)) tcl.append("opt_design") tcl.append("place_design") if self.with_phys_opt: tcl.append("phys_opt_design -directive AddRetime") tcl.append( "report_utilization -hierarchical -file {}_utilization_hierarchical_place.rpt" .format(build_name)) tcl.append("report_utilization -file {}_utilization_place.rpt".format( build_name)) tcl.append("report_io -file {}_io.rpt".format(build_name)) tcl.append( "report_control_sets -verbose -file {}_control_sets.rpt".format( build_name)) tcl.append( "report_clock_utilization -file {}_clock_utilization.rpt".format( build_name)) tcl.append("route_design") tcl.append("write_checkpoint -force {}_route.dcp".format(build_name)) tcl.append( "report_route_status -file {}_route_status.rpt".format(build_name)) tcl.append("report_drc -file {}_drc.rpt".format(build_name)) tcl.append( "report_timing_summary -datasheet -max_paths 10 -file {}_timing.rpt" .format(build_name)) tcl.append("report_power -file {}_power.rpt".format(build_name)) for bitstream_command in self.bitstream_commands: tcl.append(bitstream_command.format(build_name=build_name)) tcl.append("write_bitstream -force {}.bit ".format(build_name)) for additional_command in self.additional_commands: tcl.append(additional_command.format(build_name=build_name)) tcl.append("quit") tools.write_to_file(build_name + ".tcl", "\n".join(tcl))
analyzer_groups = {} # Analyzer group analyzer_groups[0] = [ self.serdes.d0.signals, self.serdes.d1.signals, self.serdes.d2.signals, self.serdes.d3.signals, ] # analyzer self.submodules.analyzer = LiteScopeAnalyzer(analyzer_groups, 512) def do_exit(self, vns): self.analyzer.export_csv(vns, "test/analyzer.csv") platform = arty.Platform() soc = LiteScopeSoC(platform) vns = platform.build(soc, run="no-compile" not in sys.argv[1:]) # # Create csr and analyzer files # soc.finalize() csr_csv = export.get_csr_csv(soc.csr_regions, soc.constants) write_to_file("test/csr.csv", csr_csv) soc.do_exit(vns)
def _generate_includes(self): cpu_type = self.soc.cpu_type memory_regions = self.soc.get_memory_regions() flash_boot_address = getattr(self.soc, "flash_boot_address", None) csr_regions = self.soc.get_csr_regions() constants = self.soc.get_constants() if isinstance(self.soc, soc_sdram.SoCSDRAM) and self.soc._sdram_phy: sdram_phy_settings = self.soc._sdram_phy[0].settings else: sdram_phy_settings = None buildinc_dir = os.path.join(self.output_dir, "software", "include") generated_dir = os.path.join(buildinc_dir, "generated") os.makedirs(generated_dir, exist_ok=True) variables_contents = [] def define(k, v): variables_contents.append("{}={}\n".format(k, _makefile_escape(v))) for k, v in cpu_interface.get_cpu_mak(cpu_type): define(k, v) define("SOC_DIRECTORY", soc_directory) variables_contents.append("export BUILDINC_DIRECTORY\n") define("BUILDINC_DIRECTORY", buildinc_dir) for name, src_dir in self.software_packages: define(name.upper() + "_DIRECTORY", src_dir) write_to_file(os.path.join(generated_dir, "variables.mak"), "".join(variables_contents)) write_to_file(os.path.join(generated_dir, "output_format.ld"), cpu_interface.get_linker_output_format(cpu_type)) write_to_file(os.path.join(generated_dir, "regions.ld"), cpu_interface.get_linker_regions(memory_regions)) write_to_file( os.path.join(generated_dir, "mem.h"), cpu_interface.get_mem_header(memory_regions, flash_boot_address)) write_to_file(os.path.join(generated_dir, "csr.h"), cpu_interface.get_csr_header(csr_regions, constants)) if sdram_phy_settings is not None: write_to_file(os.path.join(generated_dir, "sdram_phy.h"), sdram_init.get_sdram_phy_header(sdram_phy_settings))
def add_sources(platform, use_ghdl_yosys_plugin=False): sources = [ # Common / Types / Helpers "decode_types.vhdl", "wishbone_types.vhdl", "utils.vhdl", "common.vhdl", "helpers.vhdl", # XICS controller "xics.vhdl", ] sdir = get_data_mod("cpu", "microwatt").data_location cdir = os.path.dirname(__file__) if use_ghdl_yosys_plugin: from litex.build import tools import subprocess # ICP ys = [] ys.append( "ghdl --ieee=synopsys -fexplicit -frelaxed-rules --std=08 \\") for source in sources: ys.append(os.path.join(sdir, source) + " \\") ys.append( os.path.join(os.path.dirname(__file__), "xics_wrapper.vhdl") + " \\") ys.append("-e xics_icp_wrapper") ys.append("chformal -assert -remove") ys.append("write_verilog {}".format( os.path.join(cdir, "xics_icp.v"))) tools.write_to_file(os.path.join(cdir, "xics_icp.ys"), "\n".join(ys)) if subprocess.call([ "yosys", "-q", "-m", "ghdl", os.path.join(cdir, "xics_icp.ys") ]): raise OSError( "Unable to convert Microwatt XICS ICP controller to verilog, please check your GHDL-Yosys-plugin install" ) platform.add_source(os.path.join(cdir, "xics_icp.v")) # ICS ys = [] ys.append( "ghdl --ieee=synopsys -fexplicit -frelaxed-rules --std=08 \\") for source in sources: ys.append(os.path.join(sdir, source) + " \\") ys.append( os.path.join(os.path.dirname(__file__), "xics_wrapper.vhdl") + " \\") ys.append("-e xics_ics_wrapper") ys.append("chformal -assert -remove") ys.append("write_verilog {}".format( os.path.join(cdir, "xics_ics.v"))) tools.write_to_file(os.path.join(cdir, "xics_ics.ys"), "\n".join(ys)) if subprocess.call([ "yosys", "-q", "-m", "ghdl", os.path.join(cdir, "xics_ics.ys") ]): raise OSError( "Unable to convert Microwatt XICS ICP controller to verilog, please check your GHDL-Yosys-plugin install" ) platform.add_source(os.path.join(cdir, "xics_ics.v")) else: platform.add_sources(sdir, *sources) platform.add_source( os.path.join(os.path.dirname(__file__), "xics_wrapper.vhdl"))
def build_io_contraints(self): # Generate design constraints tools.write_to_file(self._build_name + ".xdc", _build_xdc(self.named_sc, self.named_pc)) return (self._build_name + ".xdc", "XDC")
def compare_with_reference(content, filename): write_to_file(filename, content) r = filecmp.cmp(filename, os.path.join("test", "reference", filename)) os.remove(filename) return r
def stream(self, port=20000): """ Create a Telnet server to stream data to/from the internal JTAG TAP of the FPGA Wire format: 10 bits LSB first Host to Target: - TX ready : bit 0 - RX data: : bit 1 to 8 - RX valid : bit 9 Target to Host: - RX ready : bit 0 - TX data : bit 1 to 8 - TX valid : bit 9 """ cfg = """ proc jtagstream_poll {tap tx n} { set m [string length $tx] set n [expr ($m>$n)?$m:$n] set txi [lrepeat $n {10 0x001}] set i 0 foreach txj [split $tx ""] { lset txi $i 1 [format 0x%4.4X [expr 0x201 | ([scan $txj %c] << 1)]] incr i } set txi [concat {*}$txi] set rxi [split [drscan $tap {*}$txi -endstate DRPAUSE] " "] #echo $txi:$rxi set rx "" set writable 1 foreach {rxj} $rxi { set readable [expr 0x$rxj & 0x200] set writable [expr 0x$rxj & $writable] if {$readable} { append rx [format %c [expr (0x$rxj >> 1) & 0xff]] } } return [list $rx $readable $writable] } proc jtagstream_drain {tap tx chunk_rx max_rx} { lassign [jtagstream_poll $tap $tx $chunk_rx] rx readable writable while {[expr $writable && ($readable > 0) && ([string length $rx] < $max_rx)]} { lassign [jtagstream_poll $tap "" $chunk_rx] rxi readable writable append rx $rxi } #if {!$writable} { # echo "write overflow" #} return $rx } proc jtagstream_rxtx {tap client is_poll} { if {![$client eof]} { if {!$is_poll} { set tx [$client gets] } else { set tx "" } set rx [jtagstream_drain $tap $tx 64 4096] if {[string length $rx]} { $client puts -nonewline $rx } if {$is_poll} { after 1 [list jtagstream_rxtx $tap $client 1] } } else { $client readable {} $client onexception {} $client close } } proc jtagstream_client {tap sock} { set client [$sock accept] fconfigure $client -buffering none $client readable [list jtagstream_rxtx $tap $client 0] $client onexception [list $client close] after 1 [list jtagstream_rxtx $tap $client 1] } proc jtagstream_exit {sock} { stdin readable {} $sock readable {} } proc jtagstream_serve {tap port} { set sock [socket stream.server $port] $sock readable [list jtagstream_client $tap $sock] stdin readable [list jtagstream_exit $sock] vwait forever $sock close } """ write_to_file("stream.cfg", cfg) script = "; ".join([ "init", "irscan $_CHIPNAME.tap $_USER1", "jtagstream_serve $_CHIPNAME.tap {:d}".format(port), "exit", ]) config = self.find_config() self.call(["openocd", "-f", config, "-f", "stream.cfg", "-c", script])
def add_sources(platform, use_ghdl_yosys_plugin=False): sources = [ # Common / Types / Helpers "decode_types.vhdl", "wishbone_types.vhdl", "utils.vhdl", "common.vhdl", "helpers.vhdl", # Fetch "fetch1.vhdl", "fetch2.vhdl", # Instruction/Data Cache "cache_ram.vhdl", "plru.vhdl", "dcache.vhdl", "icache.vhdl", # Decode "insn_helpers.vhdl", "decode1.vhdl", "gpr_hazard.vhdl", "cr_hazard.vhdl", "control.vhdl", "decode2.vhdl", # Register/CR File "register_file.vhdl", "crhelpers.vhdl", "cr_file.vhdl", # Execute "ppc_fx_insns.vhdl", "logical.vhdl", "rotator.vhdl", "countzero.vhdl", "execute1.vhdl", # Load/Store "loadstore1.vhdl", # Multiply/Divide "multiply.vhdl", "divider.vhdl", # Writeback "writeback.vhdl", # MMU "mmu.vhdl", # Core "core_debug.vhdl", "core.vhdl", ] sdir = get_data_mod("cpu", "microwatt").data_location cdir = os.path.dirname(__file__) if use_ghdl_yosys_plugin: from litex.build import tools import subprocess ys = [] ys.append( "ghdl --ieee=synopsys -fexplicit -frelaxed-rules --std=08 \\") for source in sources: ys.append(os.path.join(sdir, source) + " \\") ys.append( os.path.join(os.path.dirname(__file__), "microwatt_wrapper.vhdl") + " \\") ys.append("-e microwatt_wrapper") ys.append("chformal -assert -remove") ys.append("write_verilog {}".format( os.path.join(cdir, "microwatt.v"))) tools.write_to_file(os.path.join(cdir, "microwatt.ys"), "\n".join(ys)) if subprocess.call([ "yosys", "-q", "-m", "ghdl", os.path.join(cdir, "microwatt.ys") ]): raise OSError( "Unable to convert Microwatt CPU to verilog, please check your GHDL-Yosys-plugin install" ) platform.add_source(os.path.join(cdir, "microwatt.v")) else: platform.add_sources(sdir, *sources) platform.add_source( os.path.join(os.path.dirname(__file__), "microwatt_wrapper.vhdl"))
def _generate_mem_region_map(self): if self.memory_x is not None: memory_x_dir = os.path.dirname(os.path.realpath(self.memory_x)) os.makedirs(memory_x_dir, exist_ok=True) write_to_file(self.memory_x, export.get_memory_x(self.soc))
def _generate_includes(self): cpu_type = self.soc.cpu_type memory_regions = self.soc.get_memory_regions() flash_boot_address = getattr(self.soc, "flash_boot_address", None) shadow_base = getattr(self.soc, "shadow_base", None) csr_regions = self.soc.get_csr_regions() constants = self.soc.get_constants() buildinc_dir = os.path.join(self.output_dir, "software", "include") generated_dir = os.path.join(buildinc_dir, "generated") os.makedirs(generated_dir, exist_ok=True) variables_contents = [] def define(k, v): variables_contents.append("{}={}\n".format(k, _makefile_escape(v))) for k, v in cpu_interface.get_cpu_mak(self.soc.cpu): define(k, v) # Distinguish between LiteX and MiSoC. define("LITEX", "1") # Distinguish between applications running from main RAM and # flash for user-provided software packages. exec_profiles = { "COPY_TO_MAIN_RAM" : "0", "EXECUTE_IN_PLACE" : "0" } if "main_ram" in (m[0] for m in memory_regions): exec_profiles["COPY_TO_MAIN_RAM"] = "1" else: exec_profiles["EXECUTE_IN_PLACE"] = "1" for k, v in exec_profiles.items(): define(k, v) define("SOC_DIRECTORY", soc_directory) variables_contents.append("export BUILDINC_DIRECTORY\n") define("BUILDINC_DIRECTORY", buildinc_dir) for name, src_dir in self.software_packages: define(name.upper() + "_DIRECTORY", src_dir) write_to_file( os.path.join(generated_dir, "variables.mak"), "".join(variables_contents)) write_to_file( os.path.join(generated_dir, "output_format.ld"), cpu_interface.get_linker_output_format(self.soc.cpu)) write_to_file( os.path.join(generated_dir, "regions.ld"), cpu_interface.get_linker_regions(memory_regions)) write_to_file( os.path.join(generated_dir, "mem.h"), cpu_interface.get_mem_header(memory_regions, flash_boot_address, shadow_base)) write_to_file( os.path.join(generated_dir, "csr.h"), cpu_interface.get_csr_header(csr_regions, constants)) write_to_file( os.path.join(generated_dir, "git.h"), cpu_interface.get_git_header() ) if isinstance(self.soc, soc_sdram.SoCSDRAM): if hasattr(self.soc, "sdram"): write_to_file( os.path.join(generated_dir, "sdram_phy.h"), sdram_init.get_sdram_phy_c_header( self.soc.sdram.controller.settings.phy, self.soc.sdram.controller.settings.timing))