def test_fpgadataflow_ipstitch_pynq_deployment_folder(): model = ModelWrapper( ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_pynq_driver.onnx" ) try: ip = os.environ["PYNQ_IP"] # no default for this one; skip if not defined if ip == "": pytest.skip("PYNQ board IP address not specified") username = os.getenv("PYNQ_USERNAME", "xilinx") password = os.getenv("PYNQ_PASSWORD", "xilinx") target_dir = os.getenv("PYNQ_TARGET_DIR", "/home/xilinx/finn") model = model.transform(DeployToPYNQ(ip, username, password, target_dir)) pynq_ip = model.get_metadata_prop("pynq_ip") pynq_username = model.get_metadata_prop("pynq_username") pynq_password = model.get_metadata_prop("pynq_password") pynq_target_dir = model.get_metadata_prop("pynq_target_dir") assert pynq_ip == ip assert pynq_username == username assert pynq_password == password assert pynq_target_dir == target_dir deployment_dir = model.get_metadata_prop("pynq_deploy_dir") assert deployment_dir is not None assert os.path.isdir(deployment_dir) model.save( ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_pynq_deployment.onnx" ) except KeyError: pytest.skip("PYNQ board IP address not specified")
def test_fpgadataflow_ipstitch_do_stitch(): model = ModelWrapper( ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_gen_model.onnx" ) model = model.transform(CodeGen_ipstitch(test_fpga_part)) vivado_stitch_proj_dir = model.get_metadata_prop("vivado_stitch_proj") assert vivado_stitch_proj_dir is not None assert os.path.isdir(vivado_stitch_proj_dir) assert os.path.isfile(vivado_stitch_proj_dir + "/ip/component.xml") vivado_stitch_vlnv = model.get_metadata_prop("vivado_stitch_vlnv") assert vivado_stitch_vlnv is not None assert vivado_stitch_vlnv == "xilinx_finn:finn:finn_design:1.0" model.save(ip_stitch_model_dir + "/test_fpgadataflow_ip_stitch.onnx")
def step_create_stitched_ip(model: ModelWrapper, cfg: DataflowBuildConfig): """Create stitched IP for a graph after all HLS IP blocks have been generated. Depends on the DataflowOutputType.STITCHED_IP output product.""" if DataflowOutputType.STITCHED_IP in cfg.generate_outputs: stitched_ip_dir = cfg.output_dir + "/stitched_ip" model = model.transform( CreateStitchedIP(cfg._resolve_fpga_part(), cfg.synth_clk_period_ns)) # TODO copy all ip sources into output dir? as zip? copytree(model.get_metadata_prop("vivado_stitch_proj"), stitched_ip_dir) print("Vivado stitched IP written into " + stitched_ip_dir) if VerificationStepType.STITCHED_IP_RTLSIM in cfg._resolve_verification_steps( ): # prepare ip-stitched rtlsim verify_model = deepcopy(model) # rtlsim only supports impl_style=rtl for StreamingFIFO, ensure that for fifo_layer in verify_model.get_nodes_by_op_type("StreamingFIFO"): getCustomOp(fifo_layer).set_nodeattr("impl_style", "rtl") # similarly for StreamingDataWidthConverter with impl_style=hls for dwc_layer in verify_model.get_nodes_by_op_type( "StreamingDataWidthConverter_Batch"): getCustomOp(dwc_layer).set_nodeattr("impl_style", "hls") verify_model = verify_model.transform(PrepareRTLSim()) verify_model.set_metadata_prop("exec_mode", "rtlsim") verify_step(verify_model, cfg, "stitched_ip_rtlsim", need_parent=True) return model
def test_fpgadataflow_ipstitch_pynq_driver(): model = ModelWrapper(ip_stitch_model_dir + "/test_fpgadataflow_pynq_projgen.onnx") model = model.transform(MakePYNQDriver()) driver_dir = model.get_metadata_prop("pynq_driver_dir") assert driver_dir is not None assert os.path.isdir(driver_dir) model.save(ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_pynq_driver.onnx")
def test_fpgadataflow_ipstitch_pynq_synth(): model = ModelWrapper(ip_stitch_model_dir + "/test_fpgadataflow_pynq_projgen.onnx") model = model.transform(SynthPYNQProject()) bitfile = model.get_metadata_prop("vivado_pynq_bitfile") assert bitfile is not None assert os.path.isfile(bitfile) model.save(ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_pynq_synth.onnx")
def test_fpgadataflow_ipstitch_pynq_projgen(): model = ModelWrapper(ip_stitch_model_dir + "/test_fpgadataflow_ip_stitch.onnx") model = model.transform(MakePYNQProject(test_pynq_board)) vivado_pynq_proj_dir = model.get_metadata_prop("vivado_pynq_proj") assert vivado_pynq_proj_dir is not None assert os.path.isdir(vivado_pynq_proj_dir) model.save(ip_stitch_model_dir + "/test_fpgadataflow_pynq_projgen.onnx")
def step_out_of_context_synthesis(model: ModelWrapper, cfg: DataflowBuildConfig): """Run out-of-context synthesis and generate reports. Depends on the DataflowOutputType.STITCHED_IP output product.""" if DataflowOutputType.OOC_SYNTH in cfg.generate_outputs: assert ( DataflowOutputType.STITCHED_IP in cfg.generate_outputs ), "OOC needs stitched IP" model = model.transform( SynthOutOfContext( part=cfg._resolve_fpga_part(), clk_period_ns=cfg.synth_clk_period_ns ) ) report_dir = cfg.output_dir + "/report" os.makedirs(report_dir, exist_ok=True) ooc_res_dict = model.get_metadata_prop("res_total_ooc_synth") ooc_res_dict = eval(ooc_res_dict) estimate_network_performance = model.analysis(dataflow_performance) # add some more metrics to estimated performance n_clock_cycles_per_sec = float(ooc_res_dict["fmax_mhz"]) * (10 ** 6) est_fps = n_clock_cycles_per_sec / estimate_network_performance["max_cycles"] ooc_res_dict["estimated_throughput_fps"] = est_fps with open(report_dir + "/ooc_synth_and_timing.json", "w") as f: json.dump(ooc_res_dict, f, indent=2) return model
def apply(self, model): graph = model.graph if self.mode == "estimate": res_fxn = res_estimation elif self.mode == "hls": res_fxn = hls_synth_res_estimation elif self.mode == "synth": res_fxn = post_synth_res else: raise Exception("Unrecognized mode for AnnotateResources") if self.res_dict is None: self.res_dict = model.analysis(res_fxn) children_dict = {} # annotate node resources for node in graph.node: if _is_fpgadataflow_node( node) and node.name in self.res_dict.keys(): op_inst = registry.getCustomOp(node) op_inst.set_nodeattr("res_" + self.mode, str(self.res_dict[node.name])) children_dict[node.name] = self.res_dict[node.name] elif node.op_type == "StreamingDataflowPartition": # recurse into model to manually annotate per-layer resources sdp_model_filename = getCustomOp(node).get_nodeattr("model") sdp_model = ModelWrapper(sdp_model_filename) sdp_model = sdp_model.transform( AnnotateResources(self.mode, self.res_dict)) sdp_dict = sdp_model.get_metadata_prop("res_total_" + self.mode) sdp_dict = eval(sdp_dict) # save transformed model sdp_model.save(sdp_model_filename) # set res attribute for sdp node getCustomOp(node).set_nodeattr("res_" + self.mode, str(sdp_dict)) children_dict[node.name] = sdp_dict self.res_dict.update(children_dict) total_dict = {} for lname in children_dict.keys(): layer_res_dict = self.res_dict[lname] for r_type in layer_res_dict.keys(): r_amount = layer_res_dict[r_type] r_amount = float(r_amount) if r_type in total_dict.keys(): total_dict[r_type] += r_amount else: total_dict[r_type] = r_amount for k in total_dict.keys(): if "efficiency" in k: total_dict[k] = total_dict[k] / len(graph.node) model.set_metadata_prop("res_total_" + self.mode, str(total_dict)) if "(top)" in self.res_dict.keys(): top_dict = self.res_dict["(top)"] model.set_metadata_prop("res_total_top_" + self.mode, str(top_dict)) return (model, False)
def step_make_pynq_driver(model: ModelWrapper, cfg: DataflowBuildConfig): """Create a PYNQ Python driver that can be used to interface the generated accelerator.""" if DataflowOutputType.PYNQ_DRIVER in cfg.generate_outputs: driver_dir = cfg.output_dir + "/driver" model = model.transform(MakePYNQDriver(cfg._resolve_driver_platform())) copytree(model.get_metadata_prop("pynq_driver_dir"), driver_dir) print("PYNQ Python driver written into " + driver_dir) return model
def step_synthesize_bitfile(model: ModelWrapper, cfg: DataflowBuildConfig): """Synthesize a bitfile for the using the specified shell flow, using either Vivado or Vitis, to target the specified board.""" if DataflowOutputType.BITFILE in cfg.generate_outputs: bitfile_dir = cfg.output_dir + "/bitfile" os.makedirs(bitfile_dir, exist_ok=True) report_dir = cfg.output_dir + "/report" os.makedirs(report_dir, exist_ok=True) partition_model_dir = cfg.output_dir + "/intermediate_models/kernel_partitions" if cfg.shell_flow_type == ShellFlowType.VIVADO_ZYNQ: model = model.transform( ZynqBuild( cfg.board, cfg.synth_clk_period_ns, cfg.enable_hw_debug, partition_model_dir=partition_model_dir, ) ) copy(model.get_metadata_prop("bitfile"), bitfile_dir + "/finn-accel.bit") copy(model.get_metadata_prop("hw_handoff"), bitfile_dir + "/finn-accel.hwh") copy( model.get_metadata_prop("vivado_synth_rpt"), report_dir + "/post_synth_resources.xml", ) vivado_pynq_proj_dir = model.get_metadata_prop("vivado_pynq_proj") timing_rpt = ( "%s/finn_zynq_link.runs/impl_1/top_wrapper_timing_summary_routed.rpt" % vivado_pynq_proj_dir ) copy(timing_rpt, report_dir + "/post_route_timing.rpt") elif cfg.shell_flow_type == ShellFlowType.VITIS_ALVEO: model = model.transform( VitisBuild( cfg._resolve_fpga_part(), cfg.synth_clk_period_ns, cfg.vitis_platform, strategy=cfg._resolve_vitis_opt_strategy(), enable_debug=cfg.enable_hw_debug, floorplan_file=cfg.vitis_floorplan_file, partition_model_dir=partition_model_dir, ) ) copy(model.get_metadata_prop("bitfile"), bitfile_dir + "/finn-accel.xclbin") copy( model.get_metadata_prop("vivado_synth_rpt"), report_dir + "/post_synth_resources.xml", ) else: raise Exception("Unrecognized shell_flow_type: " + str(cfg.shell_flow_type)) print("Bitfile written into " + bitfile_dir) return model
def test_ipstitch_rtlsim(self, topology, wbits, abits, kind): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, "fifodepth_" + kind ) model = load_test_checkpoint_or_skip(prev_chkpt_name) test_fpga_part = get_build_env(kind, target_clk_ns)["part"] model = model.transform(InsertDWC()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(AnnotateCycles()) perf = model.analysis(dataflow_performance) latency = perf["critical_path_cycles"] # rtlsim only supports impl_style=rtl for StreamingFIFO, ensure that for fifo_layer in model.get_nodes_by_op_type("StreamingFIFO"): getCustomOp(fifo_layer).set_nodeattr("impl_style", "rtl") model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns)) model = model.transform(PrepareRTLSim()) model.set_metadata_prop("exec_mode", "rtlsim") os.environ["LIVENESS_THRESHOLD"] = str(int(latency * 1.1)) if rtlsim_trace: model.set_metadata_prop( "rtlsim_trace", "%s_w%da%d.vcd" % (topology, wbits, abits) ) os.environ["RTLSIM_TRACE_DEPTH"] = "3" rtlsim_chkpt = get_checkpoint_name( topology, wbits, abits, "ipstitch_rtlsim_" + kind ) model.save(rtlsim_chkpt) parent_chkpt = get_checkpoint_name(topology, wbits, abits, "dataflow_parent") (input_tensor_npy, output_tensor_npy) = get_golden_io_pair( topology, wbits, abits, return_topk=1 ) y = execute_parent(parent_chkpt, rtlsim_chkpt, input_tensor_npy) model = ModelWrapper(rtlsim_chkpt) perf["cycles_rtlsim"] = model.get_metadata_prop("cycles_rtlsim") # warnings.warn("Estimated & rtlsim performance: " + str(perf)) # for (k, v) in perf.items(): # update_dashboard_data(topology, wbits, abits, k, v) update_dashboard_data( topology, wbits, abits, "cycles_rtlsim", perf["cycles_rtlsim"] ) assert np.isclose(y, output_tensor_npy).all()
def step_create_stitched_ip(model: ModelWrapper, cfg: DataflowBuildConfig): """Create stitched IP for a graph after all HLS IP blocks have been generated. Depends on the DataflowOutputType.STITCHED_IP output product.""" if DataflowOutputType.STITCHED_IP in cfg.generate_outputs: stitched_ip_dir = cfg.output_dir + "/stitched_ip" model = model.transform( CreateStitchedIP( cfg._resolve_fpga_part(), cfg.synth_clk_period_ns, vitis=cfg.stitched_ip_gen_dcp, ) ) # TODO copy all ip sources into output dir? as zip? copy_tree(model.get_metadata_prop("vivado_stitch_proj"), stitched_ip_dir) print("Vivado stitched IP written into " + stitched_ip_dir) if VerificationStepType.STITCHED_IP_RTLSIM in cfg._resolve_verification_steps(): # prepare ip-stitched rtlsim verify_model = deepcopy(model) verify_model = prepare_for_stitched_ip_rtlsim(verify_model, cfg) # use critical path estimate to set rtlsim liveness threshold # (very conservative) verify_model = verify_model.transform(AnnotateCycles()) estimate_network_performance = verify_model.analysis(dataflow_performance) prev_liveness = pyverilate_get_liveness_threshold_cycles() os.environ["LIVENESS_THRESHOLD"] = str( int(estimate_network_performance["critical_path_cycles"]) ) if cfg.verify_save_rtlsim_waveforms: report_dir = cfg.output_dir + "/report" os.makedirs(report_dir, exist_ok=True) verify_model.set_metadata_prop( "rtlsim_trace", "%s/verify_rtlsim.vcd" % (report_dir) ) verify_step(verify_model, cfg, "stitched_ip_rtlsim", need_parent=True) os.environ["LIVENESS_THRESHOLD"] = str(prev_liveness) return model
def execute_node(node, context, graph, return_full_exec_context=False): """Executes a single node by using onnxruntime, with custom function or if dataflow partition by using remote execution or rtlsim. Input/output provided via context.""" if node.op_type == "GenericPartition": partition_node = getCustomOp(node) model = ModelWrapper(partition_node.get_nodeattr("model")) inp_ctx = dict(filter(lambda x: x[0] in node.input, context.items())) # inputs may have been renamed in partition for i, old_iname in enumerate(node.input): new_iname = model.graph.input[i].name if old_iname != new_iname: inp_ctx[new_iname] = inp_ctx[old_iname] del inp_ctx[old_iname] ret = execute_onnx(model, inp_ctx, return_full_exec_context) # outputs may have been renamed in partition for i, node_oname in enumerate(node.output): model_oname = model.graph.output[i].name context[node_oname] = ret[model_oname] # prefix and insert exec context entries if return_full_exec_context: for tname in ret.keys(): if tname not in [x.name for x in model.graph.output]: context[node.name + "_" + tname] = ret[tname] elif node.op_type == "StreamingDataflowPartition": sdp_node = getCustomOp(node) model = ModelWrapper(sdp_node.get_nodeattr("model")) inp_ctx = dict(filter(lambda x: x[0] in node.input, context.items())) # input may have been renamed in partition assert len(inp_ctx) == 1 old_iname = node.input[0] new_iname = model.graph.input[0].name if old_iname != new_iname: inp_ctx[new_iname] = inp_ctx[old_iname] del inp_ctx[old_iname] ret = execute_onnx(model, inp_ctx, return_full_exec_context) # if the model was in ip-stitched rtlsim mode, may get annotation # for numbet of elapsed cycles, save again if model.get_metadata_prop("exec_mode") == "rtlsim": model.save(sdp_node.get_nodeattr("model")) # output may have been renamed in partition assert len(model.graph.output) == 1 node_oname = node.output[0] model_oname = model.graph.output[0].name context[node_oname] = ret[model_oname] # prefix and insert exec context entries if return_full_exec_context: for tname in ret.keys(): if tname != model_oname: context[node.name + "_" + tname] = ret[tname] else: if is_finn_op(node.domain): ex_cu_node.execute_custom_node(node, context, graph) else: # onnxruntime unfortunately does not implement run_node as defined by ONNX, # it can only execute entire models -- so we create a model which solely # consists of our current node. # note: ensure that the same ValueInfo does not appear both in # graph.value_info as well as graph.output or graph.input # nodes with multiple outputs that are a mix of value_info and # input/outputs may get them reordered below node_inputs = list( filter(lambda x: x.name in node.input, graph.input)) node_inputs += list( filter(lambda x: x.name in node.input, graph.value_info)) node_outputs = list( filter(lambda x: x.name in node.output, graph.output)) node_outputs += list( filter(lambda x: x.name in node.output, graph.value_info)) node_graph = helper.make_graph( nodes=[node], name="single-node-exec", inputs=node_inputs, outputs=node_outputs, ) node_model = helper.make_model(node_graph) input_dict = dict() for inp in node.input: input_dict[inp] = context[inp] sess = rt.InferenceSession(node_model.SerializeToString()) output_list = sess.run(None, input_dict) for output_ind in range(len(node.output)): # get the name of the target buffer from node.output outp = node.output[output_ind] # retrieve the index of that name in node_outputs for i in range(len(node_outputs)): if outp == node_outputs[i].name: list_ind = i # use that index to index output_list if output_list[list_ind].shape != context[outp].shape: raise Exception( """Output shapes disagree after node execution: found %s vs expected %s""" % (str(output_list[list_ind].shape), str(context[outp].shape))) context[outp] = output_list[list_ind]
def apply(self, model): _check_vitis_envvars() # create a config file and empty list of xo files config = ["[connectivity]"] object_files = [] idma_idx = 0 odma_idx = 0 instance_names = {} for node in model.graph.node: assert node.op_type == "StreamingDataflowPartition", "Invalid link graph" sdp_node = getCustomOp(node) dataflow_model_filename = sdp_node.get_nodeattr("model") kernel_model = ModelWrapper(dataflow_model_filename) kernel_xo = kernel_model.get_metadata_prop("vitis_xo") object_files.append(kernel_xo) # gather info on connectivity # assume each node connected to outputs/inputs is DMA: # has axis, aximm and axilite # everything else is axis-only # assume only one connection from each ip to the next # all aximm allocated to DDR[0] # all kernels allocated to SLR0 producer = model.find_producer(node.input[0]) consumer = model.find_consumers(node.output[0]) # define kernel instances # name kernels connected to graph inputs as idmaxx # name kernels connected to graph inputs as odmaxx if producer is None: instance_names[node.name] = "idma" + str(idma_idx) config.append("nk=%s:1:%s" % (node.name, instance_names[node.name])) idma_idx += 1 elif consumer is None: instance_names[node.name] = "odma" + str(odma_idx) config.append("nk=%s:1:%s" % (node.name, instance_names[node.name])) odma_idx += 1 else: instance_names[node.name] = node.name config.append("nk=%s:1:%s" % (node.name, instance_names[node.name])) # assign SLRs config.append("slr=%s:SLR0" % instance_names[node.name]) # assign memory banks if producer is None or consumer is None: config.append("sp=%s.m_axi_gmem0:DDR[%d]" % (instance_names[node.name], 0)) # connect streams if producer is not None: for i in range(len(node.input)): producer = model.find_producer(node.input[i]) if producer is not None: j = list(producer.output).index(node.input[i]) config.append( "stream_connect=%s.m_axis_%d:%s.s_axis_%d" % ( instance_names[producer.name], j, instance_names[node.name], i, )) # create a temporary folder for the project link_dir = make_build_dir(prefix="vitis_link_proj_") model.set_metadata_prop("vitis_link_proj", link_dir) # add Vivado physopt directives if desired if self.strategy == VitisOptStrategy.PERFORMANCE_BEST: config.append("[vivado]") config.append( "prop=run.impl_1.STEPS.OPT_DESIGN.ARGS.DIRECTIVE=ExploreWithRemap" ) config.append( "prop=run.impl_1.STEPS.PLACE_DESIGN.ARGS.DIRECTIVE=Explore") config.append( "prop=run.impl_1.STEPS.PHYS_OPT_DESIGN.IS_ENABLED=true") config.append( "prop=run.impl_1.STEPS.PHYS_OPT_DESIGN.ARGS.DIRECTIVE=Explore") config.append( "prop=run.impl_1.STEPS.ROUTE_DESIGN.ARGS.DIRECTIVE=Explore") config = "\n".join(config) + "\n" with open(link_dir + "/config.txt", "w") as f: f.write(config) # create tcl script to generate resource report in XML format gen_rep_xml = templates.vitis_gen_xml_report_tcl_template gen_rep_xml = gen_rep_xml.replace("$VITIS_PROJ_PATH$", link_dir) with open(link_dir + "/gen_report_xml.tcl", "w") as f: f.write(gen_rep_xml) debug_commands = [] if self.enable_debug: for inst in list(instance_names.values()): debug_commands.append("--dk chipscope:%s" % inst) # create a shell script and call Vitis script = link_dir + "/run_vitis_link.sh" working_dir = os.environ["PWD"] with open(script, "w") as f: f.write("#!/bin/bash \n") f.write("cd {}\n".format(link_dir)) f.write("v++ -t hw --platform %s --link %s" " --kernel_frequency %d --config config.txt --optimize %s" " --save-temps -R2 %s\n" % ( self.platform, " ".join(object_files), self.f_mhz, self.strategy.value, " ".join(debug_commands), )) f.write("cd {}\n".format(working_dir)) bash_command = ["bash", script] process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE) process_compile.communicate() # TODO rename xclbin appropriately here? xclbin = link_dir + "/a.xclbin" assert os.path.isfile(xclbin), ( "Vitis .xclbin file not created, check logs under %s" % link_dir) model.set_metadata_prop("bitfile", xclbin) # run Vivado to gen xml report gen_rep_xml_sh = link_dir + "/gen_report_xml.sh" working_dir = os.environ["PWD"] with open(gen_rep_xml_sh, "w") as f: f.write("#!/bin/bash \n") f.write("cd {}\n".format(link_dir)) f.write("vivado -mode batch -source %s\n" % (link_dir + "/gen_report_xml.tcl")) f.write("cd {}\n".format(working_dir)) bash_command = ["bash", gen_rep_xml_sh] process_genxml = subprocess.Popen(bash_command, stdout=subprocess.PIPE) process_genxml.communicate() # filename for the synth utilization report synth_report_filename = link_dir + "/synth_report.xml" model.set_metadata_prop("vivado_synth_rpt", synth_report_filename) return (model, False)
def apply(self, model): # create a config file and empty list of xo files config = [] idma_idx = 0 odma_idx = 0 aximm_idx = 0 axilite_idx = 0 global_clk_ns = 0 instance_names = {} for node in model.graph.node: assert node.op_type == "StreamingDataflowPartition", "Invalid link graph" sdp_node = getCustomOp(node) dataflow_model_filename = sdp_node.get_nodeattr("model") kernel_model = ModelWrapper(dataflow_model_filename) ipstitch_path = kernel_model.get_metadata_prop( "vivado_stitch_proj") if ipstitch_path is None or (not os.path.isdir(ipstitch_path)): raise Exception( "No stitched IPI design found for %s, apply CreateStitchedIP first." % node.name) vivado_stitch_vlnv = kernel_model.get_metadata_prop( "vivado_stitch_vlnv") if vivado_stitch_vlnv is None: raise Exception( "No vlnv found for %s, apply CreateStitchedIP first." % node.name) ip_dirs = ["list"] ip_dirs += collect_ip_dirs(kernel_model, ipstitch_path) ip_dirs_str = "[%s]" % (" ".join(ip_dirs)) config.append( "set_property ip_repo_paths " "[concat [get_property ip_repo_paths [current_project]] %s] " "[current_project]" % ip_dirs_str) config.append("update_ip_catalog -rebuild -scan_changes") # get metadata property clk_ns to calculate clock frequency clk_ns = float(kernel_model.get_metadata_prop("clk_ns")) if clk_ns > global_clk_ns: global_clk_ns = clk_ns ifnames = eval( kernel_model.get_metadata_prop("vivado_stitch_ifnames")) # gather info on connectivity # assume each node connected to outputs/inputs is DMA: # has axis, aximm and axilite # everything else is axis-only # assume only one connection from each ip to the next # all aximm allocated to DDR[0] # all kernels allocated to SLR0 producer = model.find_producer(node.input[0]) consumer = model.find_consumers(node.output[0]) # define kernel instances # name kernels connected to graph inputs as idmaxx # name kernels connected to graph inputs as odmaxx if producer is None or consumer is None: if producer is None: instance_names[node.name] = "idma" + str(idma_idx) elif consumer is None: instance_names[node.name] = "odma" + str(odma_idx) config.append("create_bd_cell -type ip -vlnv %s %s" % (vivado_stitch_vlnv, instance_names[node.name])) config.append( "connect_bd_intf_net [get_bd_intf_pins %s/m_axi_gmem0] " "[get_bd_intf_pins smartconnect_0/S%02d_AXI]" % (instance_names[node.name], aximm_idx)) assert (len(ifnames["axilite"]) == 1 ), "Must have 1 AXI lite interface on IODMA nodes" axilite_intf_name = ifnames["axilite"][0] assert axilite_intf_name is not None config.append( "connect_bd_intf_net [get_bd_intf_pins %s/%s] " "[get_bd_intf_pins axi_interconnect_0/M%02d_AXI]" % (instance_names[node.name], axilite_intf_name, axilite_idx)) idma_idx += 1 aximm_idx += 1 axilite_idx += 1 else: instance_names[node.name] = node.name config.append("create_bd_cell -type ip -vlnv %s %s" % (vivado_stitch_vlnv, instance_names[node.name])) for axilite_intf_name in ifnames["axilite"]: config.append( "connect_bd_intf_net [get_bd_intf_pins %s/%s] " "[get_bd_intf_pins axi_interconnect_0/M%02d_AXI]" % (instance_names[node.name], axilite_intf_name, axilite_idx)) axilite_idx += 1 config.append("connect_bd_net [get_bd_pins %s/ap_clk] " "[get_bd_pins smartconnect_0/aclk]" % instance_names[node.name]) config.append("connect_bd_net [get_bd_pins %s/ap_rst_n] " "[get_bd_pins smartconnect_0/aresetn]" % instance_names[node.name]) # connect streams if producer is not None: for i in range(len(node.input)): producer = model.find_producer(node.input[i]) if producer is not None: j = list(producer.output).index(node.input[i]) config.append( "connect_bd_intf_net [get_bd_intf_pins %s/s_axis_%d] " "[get_bd_intf_pins %s/m_axis_%d]" % ( instance_names[node.name], i, instance_names[producer.name], j, )) # create a temporary folder for the project vivado_pynq_proj_dir = make_build_dir(prefix="vivado_zynq_proj_") model.set_metadata_prop("vivado_pynq_proj", vivado_pynq_proj_dir) fclk_mhz = int(1 / (global_clk_ns * 0.001)) # create a TCL recipe for the project ipcfg = vivado_pynq_proj_dir + "/ip_config.tcl" config = "\n".join(config) + "\n" with open(ipcfg, "w") as f: f.write(templates.custom_zynq_shell_template % ( fclk_mhz, axilite_idx, aximm_idx, self.platform, pynq_part_map[self.platform], config, self.enable_debug, )) # create a TCL recipe for the project synth_project_sh = vivado_pynq_proj_dir + "/synth_project.sh" working_dir = os.environ["PWD"] with open(synth_project_sh, "w") as f: f.write("#!/bin/bash \n") f.write("cd {}\n".format(vivado_pynq_proj_dir)) f.write("vivado -mode batch -source %s\n" % ipcfg) f.write("cd {}\n".format(working_dir)) # call the synthesis script bash_command = ["bash", synth_project_sh] process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE) process_compile.communicate() bitfile_name = (vivado_pynq_proj_dir + "/finn_zynq_link.runs/impl_1/top_wrapper.bit") if not os.path.isfile(bitfile_name): raise Exception("Synthesis failed, no bitfile found") deploy_bitfile_name = vivado_pynq_proj_dir + "/resizer.bit" copy(bitfile_name, deploy_bitfile_name) # set bitfile attribute model.set_metadata_prop("bitfile", deploy_bitfile_name) hwh_name = (vivado_pynq_proj_dir + "/finn_zynq_link.srcs/sources_1/bd/top/hw_handoff/top.hwh") if not os.path.isfile(hwh_name): raise Exception("Synthesis failed, no hardware handoff file found") deploy_hwh_name = vivado_pynq_proj_dir + "/resizer.hwh" copy(hwh_name, deploy_hwh_name) model.set_metadata_prop("hw_handoff", deploy_hwh_name) # filename for the synth utilization report synth_report_filename = vivado_pynq_proj_dir + "/synth_report.xml" model.set_metadata_prop("vivado_synth_rpt", synth_report_filename) return (model, False)