def test_make_input_chanlast():
    # load the onnx model
    raw_m = get_data("finn.data", "onnx/mnist-conv/model.onnx")
    model = ModelWrapper(raw_m)
    iname = model.graph.input[0].name
    assert tuple(model.get_tensor_shape(iname)) == (1, 1, 28, 28)
    model = model.transform(MakeInputChannelsLast())
    assert model.graph.node[0].op_type == "Transpose"
    assert tuple(model.get_tensor_shape(iname)) == (1, 28, 28, 1)
    assert model.get_tensor_layout(iname) == data_layout.NHWC
Esempio n. 2
0
def test_modelwrapper():
    lfc = get_test_model_trained("LFC", 1, 1)
    bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path)
    model = ModelWrapper(export_onnx_path)
    assert model.check_all_tensor_shapes_specified() is False
    inp_name = model.graph.input[0].name
    inp_shape = model.get_tensor_shape(inp_name)
    assert inp_shape == [1, 1, 28, 28]
    # find first matmul node
    l0_mat_tensor_name = ""
    l0_inp_tensor_name = ""
    for node in model.graph.node:
        if node.op_type == "MatMul":
            l0_inp_tensor_name = node.input[0]
            l0_mat_tensor_name = node.input[1]
            break
    assert l0_mat_tensor_name != ""
    l0_weights = model.get_initializer(l0_mat_tensor_name)
    assert l0_weights.shape == (784, 1024)
    l0_weights_hist = Counter(l0_weights.flatten())
    assert (l0_weights_hist[1.0] + l0_weights_hist[-1.0]) == 784 * 1024
    l0_weights_rand = np.random.randn(784, 1024)
    model.set_initializer(l0_mat_tensor_name, l0_weights_rand)
    assert (model.get_initializer(l0_mat_tensor_name) == l0_weights_rand).all()
    assert l0_inp_tensor_name != ""
    inp_cons = model.find_consumer(l0_inp_tensor_name)
    assert inp_cons.op_type == "MatMul"
    out_prod = model.find_producer(l0_inp_tensor_name)
    assert out_prod.op_type == "MultiThreshold"
    inp_layout = model.get_tensor_layout(inp_name)
    assert inp_layout is None
    inp_layout = DataLayout.NCHW
    model.set_tensor_layout(inp_name, inp_layout)
    assert model.get_tensor_layout(inp_name) == inp_layout
    inp_sparsity = model.get_tensor_sparsity(inp_name)
    assert inp_sparsity is None
    inp_sparsity = {"dw": {"kernel_shape": 3}}
    model.set_tensor_sparsity(inp_name, inp_sparsity)
    assert model.get_tensor_sparsity(inp_name) == inp_sparsity
    os.remove(export_onnx_path)
Esempio n. 3
0
def test_modelwrapper():
    raw_m = get_data("finn.data", "onnx/mnist-conv/model.onnx")
    model = ModelWrapper(raw_m)
    assert model.check_all_tensor_shapes_specified() is True
    inp_name = model.graph.input[0].name
    inp_shape = model.get_tensor_shape(inp_name)
    assert inp_shape == [1, 1, 28, 28]
    conv_nodes = model.get_nodes_by_op_type("Conv")
    matmul_nodes = model.get_nodes_by_op_type("MatMul")
    assert len(conv_nodes) == 2
    assert len(matmul_nodes) == 1
    first_conv = conv_nodes[0]
    first_conv_iname = first_conv.input[0]
    first_conv_wname = first_conv.input[1]
    first_conv_oname = first_conv.output[0]
    assert first_conv_iname != "" and (first_conv_iname is not None)
    assert first_conv_wname != "" and (first_conv_wname is not None)
    assert first_conv_oname != "" and (first_conv_oname is not None)
    first_conv_weights = model.get_initializer(first_conv_wname)
    assert first_conv_weights.shape == (8, 1, 5, 5)
    first_conv_weights_rand = np.random.randn(8, 1, 5, 5)
    model.set_initializer(first_conv_wname, first_conv_weights_rand)
    assert (model.get_initializer(first_conv_wname) == first_conv_weights_rand
            ).all()
    inp_cons = model.find_consumer(first_conv_iname)
    assert inp_cons == first_conv
    out_prod = model.find_producer(first_conv_oname)
    assert out_prod == first_conv
    inp_layout = model.get_tensor_layout(first_conv_iname)
    assert inp_layout is None
    inp_layout = DataLayout.NCHW
    model.set_tensor_layout(first_conv_iname, inp_layout)
    assert model.get_tensor_layout(first_conv_iname) == inp_layout
    inp_sparsity = model.get_tensor_sparsity(first_conv_iname)
    assert inp_sparsity is None
    inp_sparsity = {"dw": {"kernel_shape": [3, 3]}}
    model.set_tensor_sparsity(first_conv_iname, inp_sparsity)
    assert model.get_tensor_sparsity(first_conv_iname) == inp_sparsity
def test_infer_data_layouts():

    raw_m = get_data("finn.data", "onnx/mnist-conv/model.onnx")
    model = ModelWrapper(raw_m)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(InferDataLayouts())

    assert model.get_tensor_layout("global_in") == DataLayout.NCHW
    assert model.get_tensor_layout("Conv_0_out0") == DataLayout.NCHW
    assert model.get_tensor_layout("MaxPool_0_out0") == DataLayout.NCHW
    assert model.get_tensor_layout("Reshape_0_out0") == DataLayout.NC
    assert model.get_tensor_layout("MatMul_0_out0") == DataLayout.NC
    assert model.get_tensor_layout("global_out") == DataLayout.NC

    model = model.transform(LowerConvsToMatMul())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(InferDataLayouts())

    assert model.get_tensor_layout("global_in") == DataLayout.NCHW
    assert model.get_tensor_layout("Transpose_0_out0") == DataLayout.NHWC
    assert model.get_tensor_layout("Im2Col_0_out0") == DataLayout.NHWC
    assert model.get_tensor_layout("MatMul_0_out0") == DataLayout.NHWC
    assert model.get_tensor_layout("MaxPool_0_out0") == DataLayout.NCHW
    assert model.get_tensor_layout("Reshape_0_out0") == DataLayout.NC
    assert model.get_tensor_layout("MatMul_2_out0") == DataLayout.NC
    assert model.get_tensor_layout("global_out") == DataLayout.NC
def test_infer_data_layouts_cnv():
    cnv = get_test_model_trained("CNV", 1, 1)
    bo.export_finn_onnx(cnv, (1, 3, 32, 32), export_onnx_path_cnv)
    model = ModelWrapper(export_onnx_path_cnv)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(Streamline())
    model = model.transform(InferDataLayouts())

    assert model.get_tensor_layout("global_in") == DataLayout.NCHW
    assert model.get_tensor_layout("Conv_0_out0") == DataLayout.NCHW
    assert model.get_tensor_layout("MaxPool_0_out0") == DataLayout.NCHW
    assert model.get_tensor_layout("MultiThreshold_6_out0") == DataLayout.NCHW
    assert model.get_tensor_layout("Reshape_0_out0") == DataLayout.NC
    assert model.get_tensor_layout("MatMul_0_out0") == DataLayout.NC
    assert model.get_tensor_layout("global_out") == DataLayout.NC

    model = model.transform(LowerConvsToMatMul())
    model = model.transform(MakeMaxPoolNHWC())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(InferDataLayouts())

    assert model.get_tensor_layout("global_in") == DataLayout.NCHW
    assert model.get_tensor_layout("Transpose_0_out0") == DataLayout.NHWC
    assert model.get_tensor_layout("Im2Col_0_out0") == DataLayout.NHWC
    # note: im2col output isn't really NHWC or any other common layout
    # since the concept of channels changes with lowering... but it is
    # conceptually close to NHWC since the innermost dim gets multiplied
    assert model.get_tensor_layout("MatMul_0_out0") == DataLayout.NHWC
    assert model.get_tensor_layout("Transpose_1_out0") == DataLayout.NCHW
    assert model.get_tensor_layout("Transpose_2_out0") == DataLayout.NHWC
    assert model.get_tensor_layout("MaxPoolNHWC_0_out0") == DataLayout.NHWC
    assert model.get_tensor_layout("Reshape_0_out0") == DataLayout.NC
    assert model.get_tensor_layout("global_out") == DataLayout.NC

    model = model.transform(absorb.AbsorbTransposeIntoMultiThreshold())
    model = model.transform(ConvertBipolarMatMulToXnorPopcount())
    model = model.transform(Streamline())
    model = model.transform(to_hls.InferBinaryStreamingFCLayer())
    model = model.transform(to_hls.InferQuantizedStreamingFCLayer())
    model = model.transform(to_hls.InferConvInpGen())
    model = model.transform(to_hls.InferStreamingMaxPool())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model = model.transform(InferDataLayouts())

    assert model.get_tensor_layout("global_in") == DataLayout.NCHW
    assert model.get_tensor_layout("Transpose_0_out0") == DataLayout.NHWC
    # note: im2col output isn't really NHWC or any other common layout
    # since the concept of channels changes with lowering... but it is
    # conceptually close to NHWC since the innermost dim gets multiplied
    assert (model.get_tensor_layout("ConvolutionInputGenerator_0_out0") ==
            DataLayout.NHWC)
    assert model.get_tensor_layout(
        "StreamingFCLayer_Batch_3_out0") == DataLayout.NHWC
    assert model.get_tensor_layout("Reshape_0_out0") == DataLayout.NC
    assert model.get_tensor_layout(
        "StreamingFCLayer_Batch_6_out0") == DataLayout.NC
    assert model.get_tensor_layout("global_out") == DataLayout.NC

    os.remove(export_onnx_path_cnv)