Пример #1
0
 def apply(self, model):
     streamline_transformations = [
         ConvertSubToAdd(),
         ConvertDivToMul(),
         BatchNormToAffine(),
         ConvertSignToThres(),
         MoveMulPastMaxPool(),
         MoveScalarLinearPastInvariants(),
         AbsorbSignBiasIntoMultiThreshold(),
         MoveAddPastMul(),
         MoveScalarAddPastMatMul(),
         MoveAddPastConv(),
         MoveScalarMulPastMatMul(),
         MoveScalarMulPastConv(),
         MoveAddPastMul(),
         CollapseRepeatedAdd(),
         CollapseRepeatedMul(),
         MoveMulPastMaxPool(),
         AbsorbAddIntoMultiThreshold(),
         FactorOutMulSignMagnitude(),
         AbsorbMulIntoMultiThreshold(),
         Absorb1BitMulIntoMatMul(),
         Absorb1BitMulIntoConv(),
         RoundAndClipThresholds(),
     ]
     for trn in streamline_transformations:
         model = model.transform(trn)
         model = model.transform(RemoveIdentityOps())
         model = model.transform(GiveUniqueNodeNames())
         model = model.transform(GiveReadableTensorNames())
         model = model.transform(InferDataTypes())
     return (model, False)
Пример #2
0
def step_resnet50_streamline_linear(model: ModelWrapper,
                                    cfg: DataflowBuildConfig):
    streamline_transformations = [
        AbsorbScalarMulAddIntoTopK(
        ),  # before MoveAddPastMul to avoid int->float 
        ConvertSubToAdd(),
        ConvertDivToMul(),
        RemoveIdentityOps(),
        CollapseRepeatedMul(),
        BatchNormToAffine(),
        ConvertSignToThres(),
        MoveAddPastMul(),
        MoveScalarAddPastMatMul(),
        MoveAddPastConv(),
        MoveScalarMulPastMatMul(),
        MoveScalarMulPastConv(),
        MoveScalarLinearPastInvariants(),
        MoveAddPastMul(),
        CollapseRepeatedAdd(),
        CollapseRepeatedMul(),
        AbsorbAddIntoMultiThreshold(),
        FactorOutMulSignMagnitude(),
        MoveMaxPoolPastMultiThreshold(),
        AbsorbMulIntoMultiThreshold(),
        Absorb1BitMulIntoMatMul(),
        Absorb1BitMulIntoConv(),
        RoundAndClipThresholds(),
    ]
    for trn in streamline_transformations:
        model = model.transform(trn)
        model = model.transform(GiveUniqueNodeNames())
    return model
def test_batchnorm_to_affine_epsilon(epsilon):
    """Dummy batchnorm node to test out the epsilon attribute."""

    batchnorm_node = onnx.helper.make_node(
        "BatchNormalization",
        inputs=["x", "s", "bias", "mean", "var"],
        outputs=["y"],
        epsilon=epsilon,
    )

    x = onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT,
                                           [1, 3, 5, 5])
    s = onnx.helper.make_tensor_value_info("s", onnx.TensorProto.FLOAT, [3])
    bias = onnx.helper.make_tensor_value_info("bias", onnx.TensorProto.FLOAT,
                                              [3])
    mean = onnx.helper.make_tensor_value_info("mean", onnx.TensorProto.FLOAT,
                                              [3])
    var = onnx.helper.make_tensor_value_info("var", onnx.TensorProto.FLOAT,
                                             [3])
    y = onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT,
                                           [1, 3, 5, 5])

    # Graph
    graph = onnx.helper.make_graph(
        nodes=[batchnorm_node],
        name="test_batchnorm_graph",
        inputs=[x],
        outputs=[y],
        value_info=[s, bias, mean, var],
    )

    onnx_model = onnx.helper.make_model(graph,
                                        producer_name="test_batchnorm-model")
    model = ModelWrapper(onnx_model)

    model.set_initializer("s", np.array([1, 2, 3]).astype(np.float32))
    model.set_initializer("bias", np.array([1, 2, 3]).astype(np.float32))
    model.set_initializer("mean", np.array([3, 4, 5]).astype(np.float32))
    model.set_initializer("var", np.array([0.5, 0.7, 0.3]).astype(np.float32))

    i_val = np.arange(0, 3 * 5 * 5, dtype=np.float32)
    i_val = np.reshape(i_val, [1, 3, 5, 5])
    input_dict = {"x": i_val}
    output_node_name = "y"

    output_dict = oxe.execute_onnx(model,
                                   input_dict,
                                   return_full_exec_context=True)
    output_original = output_dict[output_node_name]

    model_lowered = model.transform(BatchNormToAffine())
    output_dict = oxe.execute_onnx(model_lowered,
                                   input_dict,
                                   return_full_exec_context=True)
    output_lowered = output_dict[output_node_name]

    assert (output_original == output_lowered).all()
Пример #4
0
def test_batchnorm_to_affine_lfc_w1a1():
    lfc = get_test_model_trained("LFC", 1, 1)
    bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path)
    model = ModelWrapper(export_onnx_path)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    new_model = model.transform(BatchNormToAffine())
    # load one of the test vectors
    raw_i = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/input_0.pb")
    input_tensor = onnx.load_tensor_from_string(raw_i)
    input_dict = {"0": nph.to_array(input_tensor)}
    assert oxe.compare_execution(model, new_model, input_dict)
    os.remove(export_onnx_path)
Пример #5
0
def test_batchnorm_to_affine_shufflenet():
    ureq.urlretrieve(download_url, export_onnx_path)
    if not os.path.isfile(export_onnx_path):
        pytest.skip("Couldn't download ONNX model, skipping")
    model = ModelWrapper(export_onnx_path)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    iname = model.graph.input[0].name
    oname = model.graph.output[0].name
    ishape = model.get_tensor_shape(iname)
    rand_inp = gen_finn_dt_tensor(DataType.INT8, ishape)
    input_dict = {iname: rand_inp}
    expected = oxe.execute_onnx(model, input_dict)[oname]
    new_model = model.transform(BatchNormToAffine())
    # check that there are no BN nodes left
    op_types = list(map(lambda x: x.op_type, new_model.graph.node))
    assert "BatchNormalization" not in op_types
    produced = oxe.execute_onnx(new_model, input_dict)[oname]
    assert np.isclose(expected, produced).all()
    os.remove(export_onnx_path)
Пример #6
0
def test_batchnorm_to_affine_cnv_w1a1():
    lfc = get_test_model_trained("CNV", 1, 1)
    bo.export_finn_onnx(lfc, (1, 3, 32, 32), export_onnx_path)
    model = ModelWrapper(export_onnx_path)
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz")
    input_tensor = np.load(fn)["arr_0"].astype(np.float32)
    input_tensor = input_tensor / 255
    assert input_tensor.shape == (1, 3, 32, 32)
    input_dict = {"0": input_tensor}
    output_dict = oxe.execute_onnx(model, input_dict)
    expected = output_dict[list(output_dict.keys())[0]]
    new_model = model.transform(BatchNormToAffine())
    # check that there are no BN nodes left
    op_types = list(map(lambda x: x.op_type, new_model.graph.node))
    assert "BatchNormalization" not in op_types
    output_dict_p = oxe.execute_onnx(new_model, input_dict)
    produced = output_dict_p[list(output_dict_p.keys())[0]]
    assert np.isclose(expected, produced).all()
    assert np.argmax(produced) == 3
    os.remove(export_onnx_path)