예제 #1
0
 def test_add_pre_and_postproc(self, topology, wbits, abits):
     prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "import_and_tidy")
     model = load_test_checkpoint_or_skip(prev_chkpt_name)
     global_inp_name = model.graph.input[0].name
     ishape = model.get_tensor_shape(global_inp_name)
     # preprocessing: torchvision's ToTensor divides uint8 inputs by 255
     totensor_pyt = ToTensor()
     chkpt_preproc_name = get_checkpoint_name(topology, wbits, abits, "preproc")
     bo.export_finn_onnx(totensor_pyt, ishape, chkpt_preproc_name)
     assert os.path.isfile(chkpt_preproc_name)
     # join preprocessing and core model
     pre_model = ModelWrapper(chkpt_preproc_name)
     model = model.transform(MergeONNXModels(pre_model))
     # add input quantization annotation: UINT8 for all BNN-PYNQ models
     global_inp_name = model.graph.input[0].name
     model.set_tensor_datatype(global_inp_name, DataType.UINT8)
     # postprocessing: insert Top-1 node at the end
     model = model.transform(InsertTopK(k=1))
     chkpt_name = get_checkpoint_name(topology, wbits, abits, "pre_post")
     # tidy-up again
     model = model.transform(InferShapes())
     model = model.transform(FoldConstants())
     model = model.transform(GiveUniqueNodeNames())
     model = model.transform(GiveReadableTensorNames())
     model = model.transform(InferDataTypes())
     model = model.transform(RemoveStaticGraphInputs())
     model.save(chkpt_name)
     assert os.path.isfile(chkpt_name)
예제 #2
0
def get_golden_io_pair(topology, wbits, abits, preproc=ToTensor(), return_topk=None):
    (model, ishape) = get_trained_network_and_ishape(topology, wbits, abits)
    input_tensor_npy = get_example_input(topology)
    input_tensor_torch = torch.from_numpy(input_tensor_npy).float()
    if preproc is not None:
        input_tensor_torch = preproc.forward(input_tensor_torch).detach()
    output_tensor_npy = model.forward(input_tensor_torch).detach().numpy()
    if return_topk is not None:
        output_tensor_npy = get_topk(output_tensor_npy, k=return_topk)
    return (input_tensor_npy, output_tensor_npy)
예제 #3
0
def pre_processing(model):
    log("Starting Pre Processing")
    global_inp_name = model.graph.input[0].name
    ishape = model.get_tensor_shape(global_inp_name)
    # preprocessing: torchvision's ToTensor divides uint8 inputs by 255
    totensor_pyt = ToTensor()
    chkpt_preproc_name = build_dir + "tfc_preproc.onnx"
    bo.export_finn_onnx(totensor_pyt, ishape, chkpt_preproc_name)
    # join preprocessing and core model
    pre_model = ModelWrapper(chkpt_preproc_name)
    model = model.transform(MergeONNXModels(pre_model))
    # add input quantization annotation: UINT8 for all BNN-PYNQ models
    global_inp_name = model.graph.input[0].name
    model.set_tensor_datatype(global_inp_name, DataType.UINT8)
    log("Finished Pre Processing!")
    save(model, "1_with_preproc")
    return model