コード例 #1
0
ファイル: test_basic_onnx_exec.py プロジェクト: umav1511/finn
def test_mnist_onnx_download_extract_run():
    # load the onnx model
    raw_m = get_data("finn", "data/onnx/mnist-conv/model.onnx")
    model = ModelWrapper(raw_m)
    model = model.transform(InferShapes())
    # load one of the test vectors
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    raw_o = get_data("finn",
                     "data/onnx/mnist-conv/test_data_set_0/output_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    output_tensor = onnx.load_tensor_from_string(raw_o)
    # run using FINN-based execution (full graph)
    input_dict = {"Input3": np_helper.to_array(input_tensor)}
    output_dict = oxe.execute_onnx(model,
                                   input_dict,
                                   return_full_exec_context=True)
    assert np.isclose(np_helper.to_array(output_tensor),
                      output_dict["Plus214_Output_0"],
                      atol=1e-3).all()
    # test subgraph execution
    start_node = model.graph.node[1]
    end_node = model.graph.node[3]
    subgraph_i_dict = {start_node.input[0]: output_dict[start_node.input[0]]}
    subgraph_o_dict = oxe.execute_onnx(
        model,
        subgraph_i_dict,
        return_full_exec_context=True,
        start_node=start_node,
        end_node=end_node,
    )
    assert np.isclose(subgraph_o_dict[end_node.output[0]],
                      output_dict[end_node.output[0]],
                      atol=1e-3).all()
コード例 #2
0
ファイル: test_renaming.py プロジェクト: fpjentzsch/finn-base
def test_renaming():
    # load the onnx model
    raw_m = get_data("finn.data", "onnx/mnist-conv/model.onnx")
    model = ModelWrapper(raw_m)
    model = model.transform(InferShapes())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    # do some basic checks
    assert model.graph.input[0].name == "global_in"
    assert model.graph.output[0].name == "global_out"
    assert model.graph.node[1].op_type == "Conv"
    assert model.graph.node[1].name == "Conv_0"
    assert model.graph.node[1].input[1] == "Conv_0_param0"
    assert model.graph.node[6].op_type == "Add"
    assert model.graph.node[6].name == "Add_1"
    assert model.graph.node[6].input[1] == "Add_1_param0"
    # ensure running renaming twice still yields the same names
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    assert model.graph.node[1].op_type == "Conv"
    assert model.graph.node[1].name == "Conv_0"
    assert model.graph.node[1].input[1] == "Conv_0_param0"
    assert model.graph.node[6].op_type == "Add"
    assert model.graph.node[6].name == "Add_1"
    assert model.graph.node[6].input[1] == "Add_1_param0"
    # run renamed model to make sure we did not mess up the topology
    raw_i = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
    raw_o = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/output_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    output_tensor = onnx.load_tensor_from_string(raw_o)
    input_dict = {"global_in": np_helper.to_array(input_tensor)}
    output_dict = oxe.execute_onnx(model, input_dict)
    assert np.isclose(
        np_helper.to_array(output_tensor), output_dict["global_out"], atol=1e-3
    ).all()
コード例 #3
0
def test_const_folding():
    raw_m = get_data("finn.data", "onnx/mnist-conv/model.onnx")
    model = ModelWrapper(raw_m)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    raw_i = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
    raw_o = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/output_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    output_tensor = onnx.load_tensor_from_string(raw_o)
    input_dict = {"Input3": np_helper.to_array(input_tensor)}
    output_dict = oxe.execute_onnx(model, input_dict)
    assert np.isclose(
        np_helper.to_array(output_tensor), output_dict["Plus214_Output_0"], atol=1e-3
    ).all()
コード例 #4
0
ファイル: test_streamline_fc.py プロジェクト: umav1511/finn
def test_streamline_fc(size, wbits, abits):
    if size == "LFC" and wbits == 2 and abits == 2:
        pytest.skip("No LFC-w2a2 present at the moment")
    if wbits > abits:
        pytest.skip("No wbits > abits cases at the moment")
    nname = "%s_%dW%dA" % (size, wbits, abits)
    finn_onnx = export_onnx_path + "/%s.onnx" % nname
    fc = get_test_model_trained(size, wbits, abits)
    bo.export_finn_onnx(fc, (1, 1, 28, 28), finn_onnx)
    model = ModelWrapper(finn_onnx)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(RemoveStaticGraphInputs())
    # load one of the test vectors
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    # run using FINN-based execution
    input_dict = {"global_in": nph.to_array(input_tensor)}
    expected_ctx = oxe.execute_onnx(model, input_dict, True)
    expected = expected_ctx[model.graph.output[0].name]
    model = model.transform(Streamline())
    model = model.transform(RemoveUnusedTensors())
    assert len(model.graph.initializer) == 11
    assert len(model.graph.value_info) == 21
    assert len(model.graph.quantization_annotation) == 20
    produced_ctx = oxe.execute_onnx(model, input_dict, True)
    produced = produced_ctx[model.graph.output[0].name]
    assert np.isclose(expected, produced, atol=1e-3).all()
コード例 #5
0
ファイル: basic_test.py プロジェクト: harshit98/onnx
    def test_save_and_load_tensor(self):  # type: () -> None
        proto = self._simple_tensor()
        cls = TensorProto
        proto_string = onnx._serialize(proto)

        # Test if input is string
        loaded_proto = onnx.load_tensor_from_string(proto_string)
        self.assertTrue(proto == loaded_proto)

        # Test if input has a read function
        f = io.BytesIO()
        onnx.save_tensor(loaded_proto, f)
        f = io.BytesIO(f.getvalue())
        loaded_proto = onnx.load_tensor(f, cls)
        self.assertTrue(proto == loaded_proto)

        # Test if input is a file name
        try:
            tfile = tempfile.NamedTemporaryFile(delete=False)
            onnx.save_tensor(proto, tfile)
            tfile.close()

            loaded_proto = onnx.load_tensor(tfile.name, cls)
            self.assertTrue(proto == loaded_proto)
        finally:
            os.remove(tfile.name)
コード例 #6
0
ファイル: basic_test.py プロジェクト: marilsoncampos/onnx
    def test_save_and_load_tensor(self):  # type: () -> None
        proto = self._simple_tensor()
        cls = TensorProto
        proto_string = onnx._serialize(proto)

        # Test if input is string
        loaded_proto = onnx.load_tensor_from_string(proto_string)
        self.assertTrue(proto == loaded_proto)

        # Test if input has a read function
        f = io.BytesIO()
        onnx.save_tensor(loaded_proto, f)
        f = io.BytesIO(f.getvalue())
        loaded_proto = onnx.load_tensor(f, cls)
        self.assertTrue(proto == loaded_proto)

        # Test if input is a file name
        try:
            tfile = tempfile.NamedTemporaryFile(delete=False)
            onnx.save_tensor(proto, tfile)
            tfile.close()

            loaded_proto = onnx.load_tensor(tfile.name, cls)
            self.assertTrue(proto == loaded_proto)
        finally:
            os.remove(tfile.name)
コード例 #7
0
ファイル: test_brevitas_fc.py プロジェクト: zxh0717/finn
def test_brevitas_fc_onnx_export_and_exec(size, wbits, abits):
    if size == "LFC" and wbits == 2 and abits == 2:
        pytest.skip("No LFC-w2a2 present at the moment")
    if wbits > abits:
        pytest.skip("No wbits > abits cases at the moment")
    nname = "%s_%dW%dA" % (size, wbits, abits)
    finn_onnx = export_onnx_path + "/%s.onnx" % nname
    fc = get_test_model_trained(size, wbits, abits)
    bo.export_finn_onnx(fc, (1, 1, 28, 28), finn_onnx)
    model = ModelWrapper(finn_onnx)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    # load one of the test vectors
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    # run using FINN-based execution
    input_dict = {"0": nph.to_array(input_tensor)}
    output_dict = oxe.execute_onnx(model, input_dict)
    produced = output_dict[list(output_dict.keys())[0]]
    # run using PyTorch/Brevitas
    input_tensor = torch.from_numpy(nph.to_array(input_tensor)).float()
    assert input_tensor.shape == (1, 1, 28, 28)
    # do forward pass in PyTorch/Brevitas
    expected = fc.forward(input_tensor).detach().numpy()
    assert np.isclose(produced, expected, atol=1e-3).all()
コード例 #8
0
def test_brevitas_debug():
    finn_onnx = "test_brevitas_debug.onnx"
    fc = get_test_model_trained("TFC", 2, 2)
    dbg_hook = bo.enable_debug(fc)
    bo.export_finn_onnx(fc, (1, 1, 28, 28), finn_onnx)
    model = ModelWrapper(finn_onnx)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(RemoveStaticGraphInputs())
    assert len(model.graph.input) == 1
    assert len(model.graph.output) == 1
    # load one of the test vectors
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    # run using FINN-based execution
    input_dict = {"0": nph.to_array(input_tensor)}
    output_dict = oxe.execute_onnx(model, input_dict, return_full_exec_context=True)
    produced = output_dict[model.graph.output[0].name]
    # run using PyTorch/Brevitas
    input_tensor = torch.from_numpy(nph.to_array(input_tensor)).float()
    assert input_tensor.shape == (1, 1, 28, 28)
    # do forward pass in PyTorch/Brevitas
    expected = fc.forward(input_tensor).detach().numpy()
    assert np.isclose(produced, expected, atol=1e-3).all()
    # check all tensors at debug markers
    names_brevitas = set(dbg_hook.values.keys())
    names_finn = set(output_dict.keys())
    names_common = names_brevitas.intersection(names_finn)
    assert len(names_common) == 16
    for dbg_name in names_common:
        tensor_pytorch = dbg_hook.values[dbg_name].detach().numpy()
        tensor_finn = output_dict[dbg_name]
        assert np.isclose(tensor_finn, tensor_pytorch, atol=1e-5).all()
    os.remove(finn_onnx)
コード例 #9
0
ファイル: test_end2end_tfc_w1a2.py プロジェクト: zxh0717/finn
def test_end2end_tfc_w1a2_run_on_pynq():
    # use the streamlined model as the "golden" model for right answers
    golden = ModelWrapper(build_dir + "/end2end_tfc_w1a2_streamlined.onnx")
    iname = golden.graph.input[0].name
    oname = golden.graph.output[0].name
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    x = nph.to_array(input_tensor)
    # x = np.zeros(ishape, dtype=np.float32)
    # run using FINN-based execution
    ret_golden = execute_onnx(golden, {iname: x}, True)
    y_golden = ret_golden[oname]
    # set up parent+child graph to test
    # we'll use models from the previous step as the child model
    parent_model = ModelWrapper(build_dir +
                                "/end2end_tfc_w1a2_dataflow_parent.onnx")
    iname = parent_model.graph.input[0].name
    oname = parent_model.graph.output[0].name
    try:
        ip = os.environ["PYNQ_IP"]  # NOQA
        if ip == "":
            pytest.skip("PYNQ board IP address not specified")
        # produce results with cppsim
        sdp_node = parent_model.get_nodes_by_op_type(
            "StreamingDataflowPartition")[0]
        sdp_node = getCustomOp(sdp_node)
        sdp_node.set_nodeattr("model",
                              build_dir + "/end2end_tfc_w1a2_pynq_deploy.onnx")
        ret = execute_onnx(parent_model, {iname: x}, True)
        y = ret[oname]
        assert np.isclose(y, y_golden).all()

    except KeyError:
        pytest.skip("PYNQ board IP address not specified")
コード例 #10
0
def test_topk_insert(k):
    raw_m = get_data("finn.data", "onnx/mnist-conv/model.onnx")
    model = ModelWrapper(raw_m)
    model.model.opset_import[0].version = 11

    # do transformations (no topk)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(InferDataTypes())

    # verification: generate random input, run through net, streamline,
    # run again, check that output is top-k
    raw_i = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    input_tensor = nph.to_array(input_tensor)
    input_dict = {"global_in": input_tensor}
    output_golden = oxe.execute_onnx(model, input_dict)["global_out"]
    output_golden_topk = np.flip(output_golden.flatten().argsort())[:k]
    output_golden_topk = output_golden_topk.flatten()

    # insert top-k
    model = model.transform(InsertTopK(k))
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(InferShapes())

    # verify output of top-k
    output_dict_topk = oxe.execute_onnx(model, input_dict)
    output_pysim_topk = output_dict_topk[list(output_dict_topk.keys())[0]]
    output_pysim_topk = output_pysim_topk.astype(np.int).flatten()

    assert np.array_equal(output_golden_topk, output_pysim_topk)
コード例 #11
0
def test_topk_insert(k):
    tfc = get_test_model_trained("TFC", 1, 1)
    bo.export_finn_onnx(tfc, (1, 1, 28, 28), export_onnx_path)
    model = ModelWrapper(export_onnx_path)

    # do transformations (no topk)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(InferDataTypes())

    # verification: generate random input, run through net, streamline,
    # run again, check that output is top-k
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    input_brevitas = torch.from_numpy(nph.to_array(input_tensor)).float()
    output_golden = tfc.forward(input_brevitas).detach().numpy()
    output_golden_topk = np.flip(output_golden.flatten().argsort())[:k]
    output_golden_topk = output_golden_topk.flatten()

    input_dict = {"global_in": nph.to_array(input_tensor)}

    # insert top-k
    model = model.transform(InsertTopK(k))
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(InferShapes())

    # verify output of top-k
    output_dict_topk = oxe.execute_onnx(model, input_dict)
    output_pysim_topk = output_dict_topk[list(output_dict_topk.keys())[0]]
    output_pysim_topk = output_pysim_topk.astype(np.int).flatten()

    assert np.array_equal(output_golden_topk, output_pysim_topk)
コード例 #12
0
def create_test_data(model="TFC"):
    brevitas_model = get_test_model_trained(model, 1, 1)  # 1 of: TFC, SFC, LFC
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb"
                     )  # Image of a Number 2 from the MNIST Dataset
    input_tensor = onnx.load_tensor_from_string(raw_i)
    input_tensor_npy = nph.to_array(input_tensor)
    input_tensor_pyt = torch.from_numpy(input_tensor_npy).float()
    golden = brevitas_model.forward(input_tensor_pyt).detach()
    input_dict = {"global_in": input_tensor_npy}
    return (input_dict, golden)
コード例 #13
0
def test_batchnorm_to_affine_lfc_w1a1():
    lfc = get_test_model_trained("LFC", 1, 1)
    bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path)
    model = ModelWrapper(export_onnx_path)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    new_model = model.transform(BatchNormToAffine())
    # load one of the test vectors
    raw_i = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    input_dict = {"0": nph.to_array(input_tensor)}
    assert oxe.compare_execution(model, new_model, input_dict)
    os.remove(export_onnx_path)
コード例 #14
0
ファイル: test_sign_to_thres.py プロジェクト: zxh0717/finn
def test_sign_to_thres():
    lfc = get_test_model_trained("LFC", 1, 1)
    bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path)
    model = ModelWrapper(export_onnx_path)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    new_model = model.transform(ConvertSignToThres())
    assert new_model.graph.node[3].op_type == "MultiThreshold"
    # load one of the test vectors
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    input_dict = {"0": nph.to_array(input_tensor)}
    assert oxe.compare_execution(model, new_model, input_dict)
    os.remove(export_onnx_path)
コード例 #15
0
def get_example_input(topology):
    "Get example numpy input tensor for given topology."

    if "fc" in topology:
        raw_i = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
        onnx_tensor = onnx.load_tensor_from_string(raw_i)
        return nph.to_array(onnx_tensor)
    elif topology == "cnv":
        fn = pk.resource_filename(
            "finn.qnn-data", "cifar10/cifar10-test-data-class3.npz"
        )
        input_tensor = np.load(fn)["arr_0"].astype(np.float32)
        return input_tensor
    else:
        raise Exception("Unknown topology, can't return example input")
コード例 #16
0
def test_streamline_fc(size, wbits, abits):
    nname = "%s_%dW%dA" % (size, wbits, abits)
    finn_onnx = export_onnx_path + "/%s.onnx" % nname
    fc = get_test_model_trained(size, wbits, abits)
    bo.export_finn_onnx(fc, (1, 1, 28, 28), finn_onnx)
    model = ModelWrapper(finn_onnx)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    # load one of the test vectors
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    # run using FINN-based execution
    input_dict = {"global_in": nph.to_array(input_tensor)}
    expected_ctx = oxe.execute_onnx(model, input_dict, True)
    expected = expected_ctx[model.graph.output[0].name]
    model = model.transform(Streamline())
    produced_ctx = oxe.execute_onnx(model, input_dict, True)
    produced = produced_ctx[model.graph.output[0].name]
    assert np.isclose(expected, produced, atol=1e-3).all()
コード例 #17
0
def test_convert_bipolar_matmul_to_xnorpopcountmatmul():
    lfc = get_test_model_trained("LFC", 1, 1)
    bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path)
    model = ModelWrapper(export_onnx_path)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(ConvertSignToThres())
    # load one of the test vectors
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    # run using FINN-based execution
    input_dict = {"global_in": nph.to_array(input_tensor)}
    expected_ctx = oxe.execute_onnx(model, input_dict, True)
    expected = expected_ctx[model.graph.output[0].name]
    model = model.transform(ConvertBipolarMatMulToXnorPopcount())
    produced_ctx = oxe.execute_onnx(model, input_dict, True)
    produced = produced_ctx[model.graph.output[0].name]
    assert np.isclose(expected, produced, atol=1e-3).all()
    os.remove(export_onnx_path)
コード例 #18
0
ファイル: test_end2end_tfc_w1a2.py プロジェクト: zxh0717/finn
def test_end2end_tfc_w1a2_verify_all():
    # use the streamlined model as the "golden" model for right answers
    golden = ModelWrapper(build_dir + "/end2end_tfc_w1a2_streamlined.onnx")
    iname = golden.graph.input[0].name
    oname = golden.graph.output[0].name
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    x = nph.to_array(input_tensor)
    # x = np.zeros(ishape, dtype=np.float32)
    ret_golden = execute_onnx(golden, {iname: x}, True)
    y_golden = ret_golden[oname]
    # set up parent+child graph to test
    # we'll use models from the previous step as the child model
    parent_model = ModelWrapper(build_dir +
                                "/end2end_tfc_w1a2_dataflow_parent.onnx")
    iname = parent_model.graph.input[0].name
    oname = parent_model.graph.output[0].name
    # produce results with cppsim
    sdp_node = parent_model.get_nodes_by_op_type(
        "StreamingDataflowPartition")[0]
    sdp_node = getCustomOp(sdp_node)
    sdp_node.set_nodeattr("model",
                          build_dir + "/end2end_tfc_w1a2_ipstitch_cppsim.onnx")
    ret_cppsim = execute_onnx(parent_model, {iname: x}, True)
    y_cppsim = ret_cppsim[oname]
    # produce results with node-by-node rtlsim
    sdp_node.set_nodeattr(
        "model",
        build_dir + "/end2end_tfc_w1a2_ipstitch_nodebynode_rtlsim.onnx")
    ret_nodebynode_rtlsim = execute_onnx(parent_model, {iname: x}, True)
    y_nodebynode_rtlsim = ret_nodebynode_rtlsim[oname]
    # produce results with whole-network (stitched ip) rtlsim
    sdp_node.set_nodeattr(
        "model", build_dir + "/end2end_tfc_w1a2_ipstitch_whole_rtlsim.onnx")
    ret_whole_rtlsim = execute_onnx(parent_model, {iname: x}, True)
    y_whole_rtlsim = ret_whole_rtlsim[oname]
    assert np.isclose(y_golden, y_cppsim).all()
    assert np.isclose(y_golden, y_nodebynode_rtlsim).all()
    assert np.isclose(y_golden, y_whole_rtlsim).all()
コード例 #19
0
def test_conv_lowering_convmnist():

    # load the onnx model
    raw_m = get_data("finn.data", "onnx/mnist-conv/model.onnx")
    model = ModelWrapper(raw_m)
    # model = model.transform(InferShapes())
    # model = model.transform(FoldConstants())
    raw_i = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    input_tensor = np_helper.to_array(input_tensor)
    # execute imported model to get expected answer
    input_name = model.graph.input[0].name
    output_name = model.graph.output[0].name
    input_dict = {input_name: input_tensor}
    output_dict_e = oxe.execute_onnx(model, input_dict)
    expected = output_dict_e[output_name]
    # execute transformed model and compare
    model = model.transform(LowerConvsToMatMul())
    model = model.transform(InferShapes())
    output_dict_p = oxe.execute_onnx(model, input_dict)
    produced = output_dict_p[output_name]
    assert np.isclose(produced, expected).all()
コード例 #20
0
def get_brev_model_and_sample_inputs(model_name, wbits, abits):
    if "FC" in model_name:
        in_shape = (1, 1, 28, 28)
        raw_i = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
        input_tensor = onnx.load_tensor_from_string(raw_i)
        input_tensor = nph.to_array(input_tensor)
        brev_model = get_test_model_trained(model_name, wbits, abits)
    elif model_name == "CNV":
        in_shape = (1, 3, 32, 32)
        fn = pk.resource_filename(
            "finn.qnn-data", "cifar10/cifar10-test-data-class3.npz"
        )
        input_tensor = np.load(fn)["arr_0"].astype(np.float32)
        input_tensor = input_tensor / 255
        brev_model = get_test_model_trained(model_name, wbits, abits)
    elif model_name == "mobilenet":
        in_shape = (1, 3, 224, 224)
        np.random.seed(42)
        input_tensor = np.random.normal(size=in_shape).astype(dtype=np.float32)
        brev_model = get_test_model_trained(model_name, 4, 4)
    else:
        raise RuntimeError(f"The model with the name {model_name} is not supported.")

    return brev_model, in_shape, input_tensor
コード例 #21
0
ファイル: test_brevitas_fc.py プロジェクト: Xilinx/finn
def test_brevitas_fc_onnx_export_and_exec(size, wbits, abits, QONNX_export):
    if size == "LFC" and wbits == 2 and abits == 2:
        pytest.skip("No LFC-w2a2 present at the moment")
    if wbits > abits:
        pytest.skip("No wbits > abits cases at the moment")
    nname = "%s_%dW%dA_QONNX-%d" % (size, wbits, abits, QONNX_export)
    finn_onnx = export_onnx_path + "/%s.onnx" % nname
    fc = get_test_model_trained(size, wbits, abits)
    ishape = (1, 1, 28, 28)
    if QONNX_export:
        BrevitasONNXManager.export(fc, ishape, finn_onnx)
        qonnx_cleanup(finn_onnx, out_file=finn_onnx)
        model = ModelWrapper(finn_onnx)
        model = model.transform(ConvertQONNXtoFINN())
        model.save(finn_onnx)
    else:
        bo.export_finn_onnx(fc, ishape, finn_onnx)
    model = ModelWrapper(finn_onnx)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(RemoveStaticGraphInputs())
    assert len(model.graph.input) == 1
    assert len(model.graph.output) == 1
    # load one of the test vectors
    raw_i = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    # run using FINN-based execution
    input_dict = {model.graph.input[0].name: nph.to_array(input_tensor)}
    output_dict = oxe.execute_onnx(model, input_dict)
    produced = output_dict[list(output_dict.keys())[0]]
    # run using PyTorch/Brevitas
    input_tensor = torch.from_numpy(nph.to_array(input_tensor)).float()
    assert input_tensor.shape == (1, 1, 28, 28)
    # do forward pass in PyTorch/Brevitas
    expected = fc.forward(input_tensor).detach().numpy()
    assert np.isclose(produced, expected, atol=1e-3).all()
コード例 #22
0
def test_convert_to_hls_layers_tfc_w1a2():
    tfc = get_test_model_trained("TFC", 1, 2)
    bo.export_finn_onnx(tfc, (1, 1, 28, 28), export_onnx_path)
    model = ModelWrapper(export_onnx_path)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(Streamline())
    from finn.transformation.fpgadataflow.convert_to_hls_layers import (
        InferQuantizedStreamingFCLayer,
    )

    model = model.transform(InferQuantizedStreamingFCLayer())

    fc0 = model.graph.node[2]
    assert fc0.op_type == "StreamingFCLayer_Batch"
    assert model.get_tensor_shape(fc0.input[0]) == [1, 784]
    assert model.get_tensor_shape(fc0.input[1]) == [784, 64]
    assert model.get_tensor_shape(fc0.input[2]) == [64, 2]
    fc1 = model.graph.node[3]
    assert fc1.op_type == "StreamingFCLayer_Batch"
    assert model.get_tensor_shape(fc1.input[0]) == [1, 64]
    assert model.get_tensor_shape(fc1.input[1]) == [64, 64]
    assert model.get_tensor_shape(fc1.input[2]) == [64, 2]
    fc2 = model.graph.node[4]
    assert fc2.op_type == "StreamingFCLayer_Batch"
    assert model.get_tensor_shape(fc2.input[0]) == [1, 64]
    assert model.get_tensor_shape(fc2.input[1]) == [64, 64]
    assert model.get_tensor_shape(fc2.input[2]) == [64, 2]
    fc3 = model.graph.node[5]
    assert fc3.op_type == "StreamingFCLayer_Batch"
    assert model.get_tensor_shape(fc3.input[0]) == [1, 64]
    assert model.get_tensor_shape(fc3.input[1]) == [64, 10]
    fc0w = getCustomOp(fc0)
    fc0w.set_nodeattr("SIMD", 784)
    fc0w.set_nodeattr("PE", 16)
    fc1w = getCustomOp(fc1)
    fc1w.set_nodeattr("SIMD", 16)
    fc1w.set_nodeattr("PE", 16)
    fc2w = getCustomOp(fc2)
    fc2w.set_nodeattr("SIMD", 16)
    fc2w.set_nodeattr("PE", 16)
    fc3w = getCustomOp(fc3)
    fc3w.set_nodeattr("SIMD", 16)
    fc3w.set_nodeattr("PE", 10)
    model = model.transform(PrepareCppSim())
    model = model.transform(CompileCppSim())
    model = model.transform(SetExecMode("cppsim"))
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    # run using FINN-based execution
    input_dict = {"global_in": nph.to_array(input_tensor)}
    output_dict = oxe.execute_onnx(model, input_dict, True)
    produced = output_dict[model.graph.output[0].name]
    model = ModelWrapper(export_onnx_path)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(Streamline())
    golden_output_dict = oxe.execute_onnx(model, input_dict, True)
    expected = golden_output_dict[model.graph.output[0].name]
    assert np.isclose(produced, expected, atol=1e-3).all()
    os.remove(export_onnx_path)
コード例 #23
0
def test_merge_onnx_models():
    # load pre model
    raw_m = get_data("finn", "data/onnx/mnist-conv/model.onnx")
    model1 = ModelWrapper(raw_m)
    # the input for model1 comes from a uint8 vector so we set the finn datatype
    # of the input tensor to DataType.UINT8 to verify that the datatypes are correctly
    # preserved in the transformed model
    model1.set_tensor_datatype(model1.graph.input[0].name, DataType.UINT8)
    model1 = model1.transform(InferShapes())
    model1 = model1.transform(GiveUniqueNodeNames())
    model1 = model1.transform(GiveReadableTensorNames())

    # set up post model
    shape = [1, 10]
    inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, shape)
    a0 = helper.make_tensor_value_info("a0", TensorProto.FLOAT, [])
    a1 = helper.make_tensor_value_info("a1", TensorProto.FLOAT, [])
    outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, shape)

    mul_node = helper.make_node("Mul", ["inp", "a0"], ["mul_out"])
    div_node = helper.make_node("Div", ["mul_out", "a1"], ["outp"])

    graph = helper.make_graph(
        nodes=[mul_node, div_node],
        name="model2-graph",
        inputs=[inp],
        outputs=[outp],
        value_info=[a0, a1],
    )

    model2 = helper.make_model(graph, producer_name="model2")
    model2 = ModelWrapper(model2)
    # initialize model2
    a0_value = np.random.uniform(low=0, high=1, size=(1)).astype(np.float32)
    model2.set_initializer("a0", a0_value)
    a1_value = np.random.uniform(low=0.1, high=1, size=(1)).astype(np.float32)
    model2.set_initializer("a1", a1_value)
    # set a dummy sparsity annotation to check if it gets correctly transferred
    # to the merged model
    sparsity = {"dw": {"kernel_shape": 0}}
    model2.set_tensor_sparsity("a1", sparsity)
    model2 = model2.transform(InferShapes())
    model2 = model2.transform(InferDataTypes())
    model2 = model2.transform(InferDataLayouts())
    model2 = model2.transform(GiveUniqueNodeNames())
    model2 = model2.transform(GiveReadableTensorNames())

    # simulate the models before the merging and pass the output of model1 to model2
    # load one of the test vectors
    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    inp_values = onnx.load_tensor_from_string(raw_i)
    inp_values = np_helper.to_array(inp_values)
    idict = {model1.graph.input[0].name: inp_values}
    odict = oxe.execute_onnx(model1, idict)
    temp = odict[model1.graph.output[0].name]

    idict = {model2.graph.input[0].name: temp}
    odict = oxe.execute_onnx(model2, idict)
    outp = odict[model2.graph.output[0].name]
    # merge models
    model_transformed = model2.transform(MergeONNXModels(model1))

    idict = {model_transformed.graph.input[0].name: inp_values}
    odict = oxe.execute_onnx(model_transformed, idict)
    outp_transformed = odict[model_transformed.graph.output[0].name]

    assert (outp == outp_transformed).all()
    assert len(model_transformed.graph.node) == len(model1.graph.node) + len(
        model2.graph.node
    )
    # to test if the value is preserved we set the sparsity annotation of input[1]
    # of the division block to a dummy value, we can now look for the division block
    # and check if the sparsity annotation is still the same
    for n in model_transformed.graph.node:
        if n.op_type == "Div":
            tensor_name = n.input[1]
            set_sparsity = model_transformed.get_tensor_sparsity(tensor_name)
            assert sparsity == set_sparsity

    # check if finn datatype of graph.input[0] is still set to UINT8
    assert model_transformed.get_tensor_datatype("global_in") == DataType.UINT8
コード例 #24
0
def test_convert_to_hls_layers_tfc_w1a1():
    tfc = get_test_model_trained("TFC", 1, 1)
    bo.export_finn_onnx(tfc, (1, 1, 28, 28), export_onnx_path)
    model = ModelWrapper(export_onnx_path)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(Streamline())
    model = model.transform(ConvertBipolarMatMulToXnorPopcount())
    model = model.transform(absorb.AbsorbAddIntoMultiThreshold())
    model = model.transform(absorb.AbsorbMulIntoMultiThreshold())
    model = model.transform(RoundAndClipThresholds())
    model = model.transform(to_hls.InferBinaryStreamingFCLayer())
    fc0 = model.graph.node[2]
    assert fc0.op_type == "StreamingFCLayer_Batch"
    assert model.get_tensor_shape(fc0.input[0]) == [1, 784]
    assert model.get_tensor_shape(fc0.input[1]) == [784, 64]
    assert model.get_tensor_shape(fc0.input[2]) == [64, 1]
    fc1 = model.graph.node[3]
    assert fc1.op_type == "StreamingFCLayer_Batch"
    assert model.get_tensor_shape(fc1.input[0]) == [1, 64]
    assert model.get_tensor_shape(fc1.input[1]) == [64, 64]
    assert model.get_tensor_shape(fc1.input[2]) == [64, 1]
    fc2 = model.graph.node[4]
    assert fc2.op_type == "StreamingFCLayer_Batch"
    assert model.get_tensor_shape(fc2.input[0]) == [1, 64]
    assert model.get_tensor_shape(fc2.input[1]) == [64, 64]
    assert model.get_tensor_shape(fc2.input[2]) == [64, 1]
    fc3 = model.graph.node[5]
    assert fc3.op_type == "StreamingFCLayer_Batch"
    assert model.get_tensor_shape(fc3.input[0]) == [1, 64]
    assert model.get_tensor_shape(fc3.input[1]) == [64, 10]

    fc0w = getCustomOp(fc0)
    fc0w.set_nodeattr("SIMD", 784)
    fc0w.set_nodeattr("PE", 16)

    fc1w = getCustomOp(fc1)
    fc1w.set_nodeattr("SIMD", 16)
    fc1w.set_nodeattr("PE", 16)

    fc2w = getCustomOp(fc2)
    fc2w.set_nodeattr("SIMD", 16)
    fc2w.set_nodeattr("PE", 16)

    fc3w = getCustomOp(fc3)
    fc3w.set_nodeattr("SIMD", 16)
    fc3w.set_nodeattr("PE", 10)

    model = model.transform(PrepareCppSim())
    model = model.transform(CompileCppSim())
    model = model.transform(SetExecMode("cppsim"))

    raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    # run using FINN-based execution
    input_dict = {"global_in": nph.to_array(input_tensor)}
    output_dict = oxe.execute_onnx(model, input_dict)
    produced = output_dict[list(output_dict.keys())[0]]
    # run using PyTorch/Brevitas
    input_tensor = torch.from_numpy(nph.to_array(input_tensor)).float()
    assert input_tensor.shape == (1, 1, 28, 28)
    # do forward pass in PyTorch/Brevitas
    expected = tfc.forward(input_tensor).detach().numpy()
    assert np.isclose(produced, expected, atol=1e-3).all()
    os.remove(export_onnx_path)
コード例 #25
0
ファイル: test_brevitas_debug.py プロジェクト: Xilinx/finn
def test_brevitas_debug(QONNX_export, QONNX_FINN_conversion):
    if (not QONNX_export) and QONNX_FINN_conversion:
        pytest.skip(
            "This test configuration is not valid and is thus skipped.")
    finn_onnx = "test_brevitas_debug.onnx"
    fc = get_test_model_trained("TFC", 2, 2)
    ishape = (1, 1, 28, 28)
    if QONNX_export:
        dbg_hook = bo.enable_debug(fc, proxy_level=True)
        BrevitasONNXManager.export(fc, ishape, finn_onnx)
        # DebugMarkers have the brevitas.onnx domain, so that needs adjusting
        model = ModelWrapper(finn_onnx)
        dbg_nodes = model.get_nodes_by_op_type("DebugMarker")
        for dbg_node in dbg_nodes:
            dbg_node.domain = "finn.custom_op.general"
        model.save(finn_onnx)
        qonnx_cleanup(finn_onnx, out_file=finn_onnx)
        if QONNX_FINN_conversion:
            model = ModelWrapper(finn_onnx)
            model = model.transform(ConvertQONNXtoFINN())
            model.save(finn_onnx)
    else:
        dbg_hook = bo.enable_debug(fc)
        bo.export_finn_onnx(fc, ishape, finn_onnx)
        model = ModelWrapper(finn_onnx)
        # DebugMarkers have the brevitas.onnx domain, so that needs adjusting
        # ToDo: We should probably have transformation pass, which does this
        #  domain conversion for us?
        dbg_nodes = model.get_nodes_by_op_type("DebugMarker")
        for dbg_node in dbg_nodes:
            dbg_node.domain = "finn.custom_op.general"
        model = model.transform(InferShapes())
        model = model.transform(FoldConstants())
        model = model.transform(RemoveStaticGraphInputs())
        model.save(finn_onnx)
    model = ModelWrapper(finn_onnx)
    assert len(model.graph.input) == 1
    assert len(model.graph.output) == 1
    # load one of the test vectors
    raw_i = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    # run using FINN-based execution
    input_dict = {model.graph.input[0].name: nph.to_array(input_tensor)}
    output_dict = oxe.execute_onnx(model,
                                   input_dict,
                                   return_full_exec_context=True)
    produced = output_dict[model.graph.output[0].name]
    # run using PyTorch/Brevitas
    input_tensor = torch.from_numpy(nph.to_array(input_tensor)).float()
    assert input_tensor.shape == (1, 1, 28, 28)
    # do forward pass in PyTorch/Brevitas
    expected = fc.forward(input_tensor).detach().numpy()
    assert np.isclose(produced, expected, atol=1e-3).all()
    # check all tensors at debug markers
    names_brevitas = set(dbg_hook.values.keys())
    names_finn = set(output_dict.keys())
    names_common = names_brevitas.intersection(names_finn)
    # The different exports return debug markers in different numbers and places
    print(len(names_common))
    if QONNX_export and not QONNX_FINN_conversion:
        assert len(names_common) == 12
    elif QONNX_export and QONNX_FINN_conversion:
        assert len(names_common) == 8
    else:
        assert len(names_common) == 16
    for dbg_name in names_common:
        if QONNX_export:
            tensor_pytorch = dbg_hook.values[dbg_name].value.detach().numpy()
        else:
            tensor_pytorch = dbg_hook.values[dbg_name].detach().numpy()
        tensor_finn = output_dict[dbg_name]
        assert np.isclose(tensor_finn, tensor_pytorch, atol=1e-5).all()
    os.remove(finn_onnx)