コード例 #1
0
def test_move_flatten_past_affine(data_layout, batch_size):
    if data_layout == DataLayout.NHWC:
        ishape = [batch_size, 1, 1, 1024]
        oshape = [batch_size, 1000]
    else:
        ishape = [batch_size, 1024, 1, 1]
        oshape = [batch_size, 1000]

    inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, ishape)
    a0 = helper.make_tensor_value_info("a1", TensorProto.FLOAT, [1024, 1000])
    a1 = helper.make_tensor_value_info("a2", TensorProto.FLOAT, [])
    a2 = helper.make_tensor_value_info("a3", TensorProto.FLOAT, [1000])
    outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, oshape)

    flatten_node = helper.make_node("Flatten", ["inp"], ["flatten_out"])
    matmul_node = helper.make_node("MatMul", ["flatten_out", "a0"], ["matmul_out"])
    mul_node = helper.make_node("Mul", ["matmul_out", "a1"], ["mul_out"])
    add_node = helper.make_node("Add", ["mul_out", "a2"], ["outp"])

    graph = helper.make_graph(
        nodes=[flatten_node, matmul_node, mul_node, add_node],
        name="move-reshape-graph",
        inputs=[inp],
        outputs=[outp],
        value_info=[a0, a1, a2],
    )

    model = helper.make_model(graph, producer_name="move_reshape_model")
    model = ModelWrapper(model)

    # initialize values
    a0_values = gen_finn_dt_tensor(DataType["TERNARY"], [1024, 1000])
    model.set_initializer("a0", a0_values)
    a1_values = np.random.uniform(low=0.1, high=0.99, size=(1)).astype(np.float32)
    model.set_initializer("a1", a1_values)
    a2_values = np.random.uniform(low=-1, high=1, size=(1000)).astype(np.float32)
    model.set_initializer("a2", a2_values)

    model.set_tensor_datatype("inp", DataType["INT2"])
    model.set_tensor_layout("inp", data_layout)
    model = model.transform(InferShapes())
    model = model.transform(InferDataTypes())
    model = model.transform(InferDataLayouts())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())

    # compare execution before and after transformation
    inp_values = gen_finn_dt_tensor(DataType["INT2"], ishape)
    idict = {model.graph.input[0].name: inp_values}
    model_transformed = model.transform(MoveFlattenPastAffine())
    assert oxe.compare_execution(model, model_transformed, idict)

    # depending on data layout check if graph is transformed or not
    if data_layout == DataLayout.NHWC:
        # check if nodes have new order in transformed graph
        assert model.graph != model_transformed.graph
        assert model_transformed.graph.node[-1].op_type == "Flatten"
    else:
        assert model.graph == model_transformed.graph
コード例 #2
0
def test_move_flatten_past_affine(data_layout, batch_size):
    if data_layout == DataLayout.NHWC:
        ishape = [batch_size, 1, 1, 1024]
        oshape = [batch_size, 1024]
    else:
        ishape = [batch_size, 1024, 1, 1]
        oshape = [batch_size, 1024]

    inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, ishape)
    outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, oshape)

    flatten_node = helper.make_node("Flatten", ["inp"], ["outp"])

    graph = helper.make_graph(
        nodes=[flatten_node],
        name="move-flatten-graph",
        inputs=[inp],
        outputs=[outp],
    )

    model = helper.make_model(graph, producer_name="move_flatten_model")
    model = ModelWrapper(model)

    model.set_tensor_datatype("inp", DataType.INT2)
    model.set_tensor_layout("inp", data_layout)
    model = model.transform(InsertTopK())
    model = model.transform(InferShapes())
    model = model.transform(InferDataTypes())
    model = model.transform(InferDataLayouts())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())

    # compare execution before and after transformation
    inp_values = gen_finn_dt_tensor(DataType.INT2, ishape)
    idict = {model.graph.input[0].name: inp_values}
    model_transformed = model.transform(MoveFlattenPastTopK())
    assert oxe.compare_execution(model, model_transformed, idict)

    # depending on data layout check if graph is transformed or not
    if data_layout == DataLayout.NHWC:
        # check if nodes have new order in transformed graph
        assert model.graph != model_transformed.graph
        assert model_transformed.graph.node[-1].op_type == "Flatten"
    else:
        assert model.graph == model_transformed.graph
コード例 #3
0
def test_modelwrapper():
    lfc = get_test_model_trained("LFC", 1, 1)
    bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path)
    model = ModelWrapper(export_onnx_path)
    assert model.check_all_tensor_shapes_specified() is False
    inp_name = model.graph.input[0].name
    inp_shape = model.get_tensor_shape(inp_name)
    assert inp_shape == [1, 1, 28, 28]
    # find first matmul node
    l0_mat_tensor_name = ""
    l0_inp_tensor_name = ""
    for node in model.graph.node:
        if node.op_type == "MatMul":
            l0_inp_tensor_name = node.input[0]
            l0_mat_tensor_name = node.input[1]
            break
    assert l0_mat_tensor_name != ""
    l0_weights = model.get_initializer(l0_mat_tensor_name)
    assert l0_weights.shape == (784, 1024)
    l0_weights_hist = Counter(l0_weights.flatten())
    assert (l0_weights_hist[1.0] + l0_weights_hist[-1.0]) == 784 * 1024
    l0_weights_rand = np.random.randn(784, 1024)
    model.set_initializer(l0_mat_tensor_name, l0_weights_rand)
    assert (model.get_initializer(l0_mat_tensor_name) == l0_weights_rand).all()
    assert l0_inp_tensor_name != ""
    inp_cons = model.find_consumer(l0_inp_tensor_name)
    assert inp_cons.op_type == "MatMul"
    out_prod = model.find_producer(l0_inp_tensor_name)
    assert out_prod.op_type == "MultiThreshold"
    inp_layout = model.get_tensor_layout(inp_name)
    assert inp_layout is None
    inp_layout = DataLayout.NCHW
    model.set_tensor_layout(inp_name, inp_layout)
    assert model.get_tensor_layout(inp_name) == inp_layout
    inp_sparsity = model.get_tensor_sparsity(inp_name)
    assert inp_sparsity is None
    inp_sparsity = {"dw": {"kernel_shape": 3}}
    model.set_tensor_sparsity(inp_name, inp_sparsity)
    assert model.get_tensor_sparsity(inp_name) == inp_sparsity
    os.remove(export_onnx_path)
コード例 #4
0
def test_modelwrapper():
    raw_m = get_data("finn.data", "onnx/mnist-conv/model.onnx")
    model = ModelWrapper(raw_m)
    assert model.check_all_tensor_shapes_specified() is True
    inp_name = model.graph.input[0].name
    inp_shape = model.get_tensor_shape(inp_name)
    assert inp_shape == [1, 1, 28, 28]
    conv_nodes = model.get_nodes_by_op_type("Conv")
    matmul_nodes = model.get_nodes_by_op_type("MatMul")
    assert len(conv_nodes) == 2
    assert len(matmul_nodes) == 1
    first_conv = conv_nodes[0]
    first_conv_iname = first_conv.input[0]
    first_conv_wname = first_conv.input[1]
    first_conv_oname = first_conv.output[0]
    assert first_conv_iname != "" and (first_conv_iname is not None)
    assert first_conv_wname != "" and (first_conv_wname is not None)
    assert first_conv_oname != "" and (first_conv_oname is not None)
    first_conv_weights = model.get_initializer(first_conv_wname)
    assert first_conv_weights.shape == (8, 1, 5, 5)
    first_conv_weights_rand = np.random.randn(8, 1, 5, 5)
    model.set_initializer(first_conv_wname, first_conv_weights_rand)
    assert (model.get_initializer(first_conv_wname) == first_conv_weights_rand
            ).all()
    inp_cons = model.find_consumer(first_conv_iname)
    assert inp_cons == first_conv
    out_prod = model.find_producer(first_conv_oname)
    assert out_prod == first_conv
    inp_layout = model.get_tensor_layout(first_conv_iname)
    assert inp_layout is None
    inp_layout = DataLayout.NCHW
    model.set_tensor_layout(first_conv_iname, inp_layout)
    assert model.get_tensor_layout(first_conv_iname) == inp_layout
    inp_sparsity = model.get_tensor_sparsity(first_conv_iname)
    assert inp_sparsity is None
    inp_sparsity = {"dw": {"kernel_shape": [3, 3]}}
    model.set_tensor_sparsity(first_conv_iname, inp_sparsity)
    assert model.get_tensor_sparsity(first_conv_iname) == inp_sparsity
コード例 #5
0
def test_absorb_transp_into_flatten(perm, shape, ishape, data_layout):
    inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, ishape)
    transp_node = helper.make_node("Transpose", ["inp"], ["transp_out"], perm=perm)
    dummy_in = np.random.uniform(low=0, high=1, size=tuple(ishape)).astype(np.float32)
    if shape is None:
        shape_node = helper.make_node("Flatten", ["transp_out"], ["outp"])
        dummy_in = dummy_in.transpose(tuple(perm))
        oshape = dummy_in.reshape(dummy_in.shape[0], -1).shape
        outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, oshape)
        shape0 = None
    else:
        shape0 = helper.make_tensor_value_info("shape0", TensorProto.FLOAT, shape)
        shape_node = helper.make_node("Reshape", ["transp_out", "shape0"], ["outp"])
        oshape = dummy_in.transpose(tuple(perm)).reshape(tuple(shape)).shape
        outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, oshape)

    graph = helper.make_graph(
        nodes=[transp_node, shape_node],
        name="absorb-transpose-graph",
        inputs=[inp],
        outputs=[outp],
    )

    model = helper.make_model(graph, producer_name="absorb_transpose_model")
    model = ModelWrapper(model)
    if shape is not None:
        model.graph.value_info.append(shape0)
        model.set_initializer("shape0", np.asarray(shape))
    if data_layout == "NCHW":
        model.set_tensor_layout("inp", DataLayout.NCHW)
    else:
        model.set_tensor_layout("inp", DataLayout.NHWC)
    model = model.transform(InferShapes())
    model = model.transform(InferDataTypes())
    model = model.transform(InferDataLayouts())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    # model.save("test.onnx")
    model_transformed = model.transform(AbsorbTransposeIntoFlatten())
    # model_transformed.save("test2.onnx")

    # verify transformation
    inp_values = np.random.uniform(low=-1, high=1, size=tuple(ishape)).astype(
        np.float32
    )
    idict = {model.graph.input[0].name: inp_values}
    assert oxe.compare_execution(model, model_transformed, idict)

    # only some of the parameter combinations lead to a graph that will be changed when
    # AbsorbTransposeIntoFlatten is applied

    if shape == [-1, 1]:  # not a flatten operation, so the graph will not be changed
        assert model.graph == model_transformed.graph

    elif perm == [
        3,
        2,
        0,
        1,
    ]:  # the first dimension is also part of the transpose operation
        # so the graph will not be changed
        assert model.graph == model_transformed.graph

    # the following cases are the ones in which the model is transformed
    # because we tested the parameters shape and perm befire we can only consider ishape
    # and data_layout (the transformed model should only contain a "Flatten" node)
    elif ishape == [1, 1, 1, 4] and data_layout == "NHWC":
        assert model_transformed.graph.node[0].op_type == "Flatten"

    elif ishape == [2, 4, 1, 1] and data_layout == "NCHW" and shape is None:
        # If the first  dimension of the input tensor is not 1, flatten and
        # reshape (with shape = [1, -1]) would lead to different results
        assert model_transformed.graph.node[0].op_type == "Flatten"

    # all other cases lead to an unchanged model
    else:
        assert model.graph == model_transformed.graph
コード例 #6
0
def test_move_transpose_past_scalar_mul(perm, scalar, data_layout):
    inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, 2, 3, 4])
    # to determine out_size we need to calculate with "perm" for this test case
    dummy_in = np.random.uniform(low=0, high=1,
                                 size=(1, 2, 3, 4)).astype(np.float32)
    out_size = dummy_in.transpose(tuple(perm)).shape

    if scalar is True:
        a0_size = []
    else:
        a0_size = out_size
    a0 = helper.make_tensor_value_info("a0", TensorProto.FLOAT, a0_size)
    outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, out_size)
    transp_node = helper.make_node("Transpose", ["inp"], ["transp_out"],
                                   perm=perm)
    mul_node = helper.make_node("Mul", ["transp_out", "a0"], ["outp"])

    graph = helper.make_graph(
        nodes=[transp_node, mul_node],
        name="mv-transpose-graph",
        inputs=[inp],
        outputs=[outp],
        value_info=[a0],
    )

    model = helper.make_model(graph, producer_name="mv_transpose_model")
    model = ModelWrapper(model)

    # initialize values
    a0_values = np.random.uniform(low=0, high=1,
                                  size=tuple(a0_size)).astype(np.float32)
    model.set_initializer("a0", a0_values)
    if data_layout is not None:
        model.set_tensor_layout("inp", data_layout)
        model = model.transform(InferDataLayouts())

    model = model.transform(InferShapes())
    model = model.transform(InferDataTypes())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())

    # compare execution before and after transformation
    inp_values = np.random.uniform(low=0, high=1,
                                   size=(1, 2, 3, 4)).astype(np.float32)
    idict = {model.graph.input[0].name: inp_values}
    model_transformed = model.transform(MoveTransposePastScalarMul())
    assert oxe.compare_execution(model, model_transformed, idict)

    # check if order changed
    if scalar is True and data_layout is not None:
        assert model_transformed.graph.node[0] != model.graph.node[0]
        assert model_transformed.graph.node[1] != model.graph.node[1]
        assert model_transformed.graph.node[0].op_type == "Mul"
        assert model_transformed.graph.node[1].op_type == "Transpose"
        mul_input = model_transformed.graph.node[0].input[0]
        mul_output = model_transformed.graph.node[0].output[0]
        assert model_transformed.get_tensor_layout(mul_input) == data_layout
        assert model_transformed.get_tensor_layout(mul_output) == data_layout
    else:
        assert model_transformed.graph.node[0] == model.graph.node[0]
        assert model_transformed.graph.node[1] == model.graph.node[1]
        if data_layout is not None:
            mul_input = model_transformed.graph.node[1].input[0]
            mul_output = model_transformed.graph.node[1].output[0]
            assert model_transformed.get_tensor_layout(
                mul_input) != data_layout
            assert model_transformed.get_tensor_layout(
                mul_output) != data_layout
コード例 #7
0
def test_convert_to_hls_conv_fc_transition(conv_config, depthwise,
                                           use_reshape):
    np.random.seed(0)
    idt = DataType["UINT4"]
    odt = DataType["UINT4"]
    conv_weight_dt = DataType["INT4"]
    fc_weight_dt = DataType["INT4"]

    input_shape, kernel_shape, stride, pad = conv_config
    kernel_size_h, kernel_size_w = kernel_shape
    input_size_h, input_size_w = input_shape
    stride_h, stride_w = stride
    pad_h, pad_w = pad

    in_chn = 4
    fc_filters = 16

    if depthwise is True:
        group = out_chn = in_chn
        conv_param_shape = [out_chn, 1, kernel_size_h, kernel_size_w]
    else:
        group = 1
        out_chn = 8
        conv_param_shape = [out_chn, in_chn, kernel_size_h, kernel_size_w]

    output_size_h = compute_conv_output_dim(input_size_h, kernel_size_h,
                                            stride_h, 2 * pad_h)
    output_size_w = compute_conv_output_dim(input_size_w, kernel_size_w,
                                            stride_w, 2 * pad_w)

    input_shape = [1, in_chn, input_size_h, input_size_w]
    fc_param_shape = [out_chn * output_size_h * output_size_w, fc_filters]
    output_shape = [1, fc_filters]

    conv_config = {}
    conv_config["dilations"] = [1, 1]
    conv_config["group"] = group
    conv_config["kernel_shape"] = [kernel_size_h, kernel_size_w]
    conv_config["pads"] = [pad_h, pad_w, pad_h, pad_w]
    conv_config["strides"] = [stride_h, stride_w]

    global_in = helper.make_tensor_value_info("global_in", TensorProto.FLOAT,
                                              input_shape)
    global_out = helper.make_tensor_value_info("global_out", TensorProto.FLOAT,
                                               output_shape)
    value_info = [
        helper.make_tensor_value_info("conv_param", TensorProto.FLOAT,
                                      conv_param_shape),
        helper.make_tensor_value_info("thres1_param", TensorProto.FLOAT,
                                      (out_chn, 15)),
        helper.make_tensor_value_info("matmul_param", TensorProto.FLOAT,
                                      fc_param_shape),
        helper.make_tensor_value_info("thres2_param", TensorProto.FLOAT,
                                      (fc_filters, 15)),
        helper.make_tensor_value_info("reshape_shape", TensorProto.INT64, []),
    ]

    if use_reshape:
        flatten_node = helper.make_node("Reshape",
                                        ["thres1_out", "reshape_shape"],
                                        ["flatten_out"])
    else:
        flatten_node = helper.make_node("Flatten", ["thres1_out"],
                                        ["flatten_out"],
                                        axis=1)

    modelproto = helper.make_model(
        helper.make_graph(
            name="test",
            inputs=[global_in],
            outputs=[global_out],
            value_info=value_info,
            nodes=[
                helper.make_node("Conv", ["global_in", "conv_param"],
                                 ["conv_out"], **conv_config),
                helper.make_node(
                    "MultiThreshold",
                    ["conv_out", "thres1_param"],
                    ["thres1_out"],
                    domain="finn.custom_op.general",
                    out_dtype="UINT4",
                ),
                flatten_node,
                helper.make_node("MatMul", ["flatten_out", "matmul_param"],
                                 ["matmul_out"]),
                helper.make_node(
                    "MultiThreshold",
                    ["matmul_out", "thres2_param"],
                    ["global_out"],
                    domain="finn.custom_op.general",
                    out_dtype="UINT4",
                ),
            ],
        ))

    model = ModelWrapper(modelproto)
    model.set_tensor_datatype("global_in", idt)
    model.set_tensor_layout("global_in", DataLayout.NCHW)
    model.set_tensor_datatype("global_out", odt)
    model.set_tensor_datatype("conv_param", conv_weight_dt)
    model.set_tensor_datatype("matmul_param", fc_weight_dt)
    model.set_tensor_datatype("thres1_param", DataType["INT32"])
    model.set_tensor_datatype("thres2_param", DataType["INT32"])

    model.set_initializer("conv_param",
                          gen_finn_dt_tensor(conv_weight_dt, conv_param_shape))
    model.set_initializer("thres1_param",
                          get_multithreshold_rand_params(out_chn, 15, seed=0))
    model.set_initializer(
        "thres2_param", get_multithreshold_rand_params(fc_filters, 15, seed=0))
    model.set_initializer("matmul_param",
                          gen_finn_dt_tensor(fc_weight_dt, fc_param_shape))
    model.set_initializer("reshape_shape", np.array([1, -1]))

    model = model.transform(InferShapes())
    model = model.transform(InferDataTypes())
    model = model.transform(InferDataLayouts())

    # streamlining
    new_model = model.transform(MoveScalarLinearPastInvariants())
    new_model = new_model.transform(Streamline())
    new_model = new_model.transform(LowerConvsToMatMul())
    new_model = new_model.transform(absorb.AbsorbTransposeIntoMultiThreshold())
    new_model = new_model.transform(Streamline())
    new_model = new_model.transform(InferDataLayouts())
    new_model = new_model.transform(RemoveUnusedTensors())

    # convert_to_hls
    if depthwise is True:
        new_model = new_model.transform(to_hls.InferVVAU())
    new_model = new_model.transform(to_hls.InferQuantizedStreamingFCLayer())
    new_model = new_model.transform(to_hls.InferThresholdingLayer())
    new_model = new_model.transform(to_hls.InferConvInpGen())
    new_model = new_model.transform(to_hls.InferStreamingMaxPool())
    new_model = new_model.transform(RemoveCNVtoFCFlatten())
    new_model = new_model.transform(absorb.AbsorbConsecutiveTransposes())
    new_model = new_model.transform(GiveUniqueNodeNames())
    new_model = new_model.transform(InferDataLayouts())

    # prepare cppsim
    new_model = new_model.transform(PrepareCppSim())
    new_model = new_model.transform(CompileCppSim())
    new_model = new_model.transform(SetExecMode("cppsim"))

    # check for correct execution
    x = gen_finn_dt_tensor(idt, input_shape)
    inp_dict = {model.graph.input[0].name: x}
    assert oxe.compare_execution(model, new_model, inp_dict)

    num_transpose = len(new_model.get_nodes_by_op_type("Transpose"))
    num_flatten = len(new_model.get_nodes_by_op_type("Flatten"))
    num_reshape = len(new_model.get_nodes_by_op_type("Reshape"))

    # check if transpose->flatten was removed
    assert num_transpose == 1 and num_flatten == 0 and num_reshape == 0