def xgraph_dpu_build_func(xgraph, work_dir=os.getcwd(), data_layout='NCHW', **kwargs) -> XGraph: """ Build/schedule and XGraph for execution on the DPUCADX8G target Arguments: ---------- xgraph: XGraph the xgraph to be built for execution work_dir: str the path to the work directory to be used data_layout: str the layout to be used for the DPU partitions, is NCHW by default but can be overridden for certain runtimes, for example the decentq simulation runtime makes use of this because quantization simulation is done in NHWC data layout instead of the NCHW data layout of the DPU Returns: -------- And XGraph built/scheduled for execution on DPU """ # NOTE DPU V1 layers are in NHWC format because of the tensorflow # intemediate structure we use to communicate with dpu v1 compiler return subgraph.xgraph_build_func(xgraph=xgraph, target='DPUCADX8G', xtype='DPU', layout=data_layout, work_dir=work_dir)
def xgraph_build_func_simple(xgraph): return subgraph.xgraph_build_func( xgraph=xgraph, target='test_simple', xtype='TEST_SIMPLE', layout='NCHW', )
def xgraph_build_func(xgraph): return subgraph.xgraph_build_func( xgraph=xgraph, target='test', xtype='TEST', layout='NHWC' )
def xgraph_dpuv1_build_func(xgraph, work_dir=os.getcwd(), **kwargs): # NOTE DPU V1 layers are in NHWC format because of the tensorflow # intemediate structure we use to communicate with dpu v1 compiler return subgraph.xgraph_build_func(xgraph=xgraph, target='dpuv1', xtype='DPU', layout='NCHW', work_dir=work_dir)
def xgraph_dpu_zcu104_build_func(xgraph, work_dir=os.getcwd(), **kwargs): # TODO here or in optimizer, both? # DPU layers are in NHWC format because of the tensorflow # intemediate structure we use to communicate with # DECENT/DNNC return subgraph.xgraph_build_func(xgraph=xgraph, target='DPUCZDX8G-zcu104', xtype='DPU', layout='NHWC', work_dir=work_dir)