def test_fpgadataflow_ipstitch_pynq_driver(): model = ModelWrapper(ip_stitch_model_dir + "/test_fpgadataflow_pynq_projgen.onnx") model = model.transform(MakePYNQDriver()) driver_dir = model.get_metadata_prop("pynq_driver_dir") assert driver_dir is not None assert os.path.isdir(driver_dir) model.save(ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_pynq_driver.onnx")
def step_make_pynq_driver(model: ModelWrapper, cfg: DataflowBuildConfig): """Create a PYNQ Python driver that can be used to interface the generated accelerator.""" if DataflowOutputType.PYNQ_DRIVER in cfg.generate_outputs: driver_dir = cfg.output_dir + "/driver" model = model.transform(MakePYNQDriver(cfg._resolve_driver_platform())) copytree(model.get_metadata_prop("pynq_driver_dir"), driver_dir) print("PYNQ Python driver written into " + driver_dir) return model
def apply(self, model): _check_vitis_envvars() # first infer layouts model = model.transform(InferDataLayouts()) # prepare at global level, then break up into kernels prep_transforms = [ MakePYNQDriver(platform="alveo"), InsertIODMA(512), InsertDWC(), ] for trn in prep_transforms: model = model.transform(trn) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) model = model.transform(Floorplan(floorplan=self.floorplan_file)) model = model.transform(CreateDataflowPartition()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) # Build each kernel individually sdp_nodes = model.get_nodes_by_op_type("StreamingDataflowPartition") for sdp_node in sdp_nodes: sdp_node = getCustomOp(sdp_node) dataflow_model_filename = sdp_node.get_nodeattr("model") kernel_model = ModelWrapper(dataflow_model_filename) kernel_model = kernel_model.transform(InsertFIFO()) kernel_model = kernel_model.transform( InsertTLastMarker(both=True, external=False, dynamic=False)) kernel_model = kernel_model.transform(GiveUniqueNodeNames()) kernel_model.save(dataflow_model_filename) kernel_model = kernel_model.transform( PrepareIP(self.fpga_part, self.period_ns)) kernel_model = kernel_model.transform(HLSSynthIP()) kernel_model = kernel_model.transform( CreateStitchedIP(self.fpga_part, self.period_ns, sdp_node.onnx_node.name, True)) kernel_model = kernel_model.transform( CreateVitisXO(sdp_node.onnx_node.name)) kernel_model.set_metadata_prop("platform", "alveo") kernel_model.save(dataflow_model_filename) # Assemble design from kernels model = model.transform( VitisLink( self.platform, round(1000 / self.period_ns), strategy=self.strategy, enable_debug=self.enable_debug, )) # set platform attribute for correct remote execution model.set_metadata_prop("platform", "alveo") return (model, False)
def test_make_pynq_driver(self, topology, wbits, abits, QONNX_export, kind): if kind == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "build_" + kind) model = load_test_checkpoint_or_skip(prev_chkpt_name) kind_to_driver_platform = {"zynq": "zynq-iodma", "alveo": "alveo"} model = model.transform(MakePYNQDriver(kind_to_driver_platform[kind])) model.save( get_checkpoint_name(topology, wbits, abits, QONNX_export, "driver_" + kind))
def test_fpgadataflow_fifo_rtlsim(Shape, folded_shape, depth, finn_dtype): # generate input data x = gen_finn_dt_tensor(finn_dtype, Shape) input_dict = prepare_inputs(x, finn_dtype) model = make_single_fifo_modelwrapper(Shape, depth, folded_shape, finn_dtype) model = model.transform(SetExecMode("rtlsim")) model = model.transform(InsertTLastMarker()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) model = model.transform(PrepareRTLSim()) y = oxe.execute_onnx(model, input_dict)["outp"] assert ( y == x ).all(), """The output values are not the same as the input values anymore.""" assert y.shape == tuple(Shape), """The output shape is incorrect.""" model = model.transform(ReplaceVerilogRelPaths()) model = model.transform(CreateStitchedIP(test_fpga_part)) model = model.transform(MakePYNQProject(test_pynq_board)) model = model.transform(SynthPYNQProject()) model = model.transform(MakePYNQDriver()) ip = os.environ["PYNQ_IP"] username = os.getenv("PYNQ_USERNAME", "xilinx") password = os.getenv("PYNQ_PASSWORD", "xilinx") port = os.getenv("PYNQ_PORT", 22) target_dir = os.getenv("PYNQ_TARGET_DIR", "/home/xilinx/finn") model = model.transform(DeployToPYNQ(ip, port, username, password, target_dir)) res = throughput_test(model) expected_dict = {} expected_dict["runtime[ms]"] = [] expected_dict["throughput[images/s]"] = [] expected_dict["DRAM_in_bandwidth[Mb/s]"] = [] expected_dict["DRAM_out_bandwidth[Mb/s]"] = [] for key in expected_dict: assert ( key in res ), """Throughput test not successful, no value for {} in result dictionary""".format( key )
def apply(self, model): # first infer layouts model = model.transform(InferDataLayouts()) # prepare at global level, then break up into kernels prep_transforms = [ InsertIODMA(64), InsertDWC(), Floorplan(), CreateDataflowPartition(), ] for trn in prep_transforms: model = model.transform(trn) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) # Build each kernel individually sdp_nodes = model.get_nodes_by_op_type("StreamingDataflowPartition") for sdp_node in sdp_nodes: prefix = sdp_node.name + "_" sdp_node = getCustomOp(sdp_node) dataflow_model_filename = sdp_node.get_nodeattr("model") kernel_model = ModelWrapper(dataflow_model_filename) kernel_model = kernel_model.transform(InsertFIFO()) kernel_model = kernel_model.transform(GiveUniqueNodeNames(prefix)) kernel_model.save(dataflow_model_filename) kernel_model = kernel_model.transform( PrepareIP(self.fpga_part, self.period_ns)) kernel_model = kernel_model.transform(HLSSynthIP()) kernel_model = kernel_model.transform( CreateStitchedIP(self.fpga_part, self.period_ns, sdp_node.onnx_node.name, True)) kernel_model.set_metadata_prop("platform", "zynq-iodma") kernel_model.save(dataflow_model_filename) # Assemble design from IPs model = model.transform( MakeZYNQProject(self.platform, enable_debug=self.enable_debug)) # set platform attribute for correct remote execution model.set_metadata_prop("platform", "zynq-iodma") # create driver model = model.transform(MakePYNQDriver(platform="zynq-iodma")) return (model, False)
def test_end2end_tfc_w1a2_make_driver(): model = ModelWrapper(build_dir + "/end2end_tfc_w1a2_synth.onnx") model = model.transform(MakePYNQDriver()) model.save(build_dir + "/end2end_tfc_w1a2_pynq_driver.onnx")