def test_change_datalayout_quantavgpool(s, k, ibits, obits, signed, c, idim): n = 1 odim = compute_pool_output_dim(idim, k, s) # determine input FINN datatype if signed is True: prefix = "INT" else: prefix = "UINT" dt_name = prefix + str(ibits) dtype = DataType[dt_name] inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [n, c, idim, idim]) outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [n, c, odim, odim]) node = helper.make_node( "QuantAvgPool2d", ["inp"], ["outp"], domain="finn", stride=s, kernel=k, ibits=ibits, obits=obits, signed=signed, data_layout="NCHW", ) graph = helper.make_graph(nodes=[node], name="single-quantavgpool", inputs=[inp], outputs=[outp]) model = helper.make_model(graph) model = ModelWrapper(model) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) model = model.transform(InferDataLayouts()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) model_transformed = model.transform(ChangeDataLayoutQuantAvgPool2d()) model_transformed = model_transformed.transform(InferShapes()) model_transformed = model_transformed.transform(InferDataTypes()) model_transformed = model_transformed.transform(InferDataLayouts()) model_transformed = model_transformed.transform(GiveUniqueNodeNames()) model_transformed = model_transformed.transform(GiveReadableTensorNames()) inp_values = gen_finn_dt_tensor(dtype, [n, c, idim, idim]) idict = {"inp": inp_values} assert oxe.compare_execution(model, model_transformed, idict) assert len(model.graph.node) + 2 == len(model_transformed.graph.node) assert model_transformed.graph.node[-1].op_type == "Transpose" assert model_transformed.graph.node[0].op_type == "Transpose" # check if QuantAvgPool2d node has datalayout set correctly node = model_transformed.graph.node[1] d_layout = get_by_name(node.attribute, "data_layout").s.decode("UTF-8") assert d_layout == "NHWC" assert model_transformed.get_tensor_layout( node.input[0]) == DataLayout.NHWC assert model_transformed.get_tensor_layout( node.output[0]) == DataLayout.NHWC
def test_collapse_repeated_op(): top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2]) add_param_0 = oh.make_tensor_value_info("add_param_0", TensorProto.FLOAT, [2]) mul_param_0 = oh.make_tensor_value_info("mul_param_0", TensorProto.FLOAT, [2]) add_param_1 = oh.make_tensor_value_info("add_param_1", TensorProto.FLOAT, [2]) mul_param_1 = oh.make_tensor_value_info("mul_param_1", TensorProto.FLOAT, [2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2]) modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=[add_param_0, mul_param_0, add_param_1, mul_param_1], nodes=[ oh.make_node("Add", ["top_in", "add_param_0"], ["middle_0"]), oh.make_node("Add", ["middle_0", "add_param_1"], ["middle_1"]), oh.make_node("Mul", ["middle_1", "mul_param_0"], ["middle_2"]), oh.make_node("Mul", ["middle_2", "mul_param_1"], ["top_out"]), ], ) ) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) model.set_initializer("add_param_0", np.asarray([1, 3], dtype=np.float32)) model.set_initializer("add_param_1", np.asarray([-1, 3], dtype=np.float32)) model.set_initializer("mul_param_0", np.asarray([2, 4], dtype=np.float32)) model.set_initializer("mul_param_1", np.asarray([2, -4], dtype=np.float32)) new_model = model.transform(CollapseRepeatedAdd()) new_model = new_model.transform(CollapseRepeatedMul()) inp_dict = {"top_in": np.asarray([-1.0, 1.0], dtype=np.float32)} assert ox.compare_execution(model, new_model, inp_dict)
def test_depthwise_conv_hls_rtlsim(act, pe, k, stride, padding): idt = wdt = DataType.INT4 ifm_dim = 6 ifm_ch = 4 # set up reference model consisting of Im2Col + MatMul (+ MultiThreshold) model = set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding) input_tensor = gen_finn_dt_tensor(idt, [1, ifm_dim, ifm_dim, ifm_ch]) input_dict = {"inp": input_tensor} new_model = model.transform(InferConvInpGen()) new_model = new_model.transform(InferVVAU()) # set SIMD in ConvInputGen node and PE in VVAU node for n in new_model.graph.node: if n.op_type == "ConvolutionInputGenerator": convinputgen_node = getCustomOp(n) convinputgen_node.set_nodeattr("SIMD", pe) elif n.op_type == "Vector_Vector_Activate_Batch": vvau_node = getCustomOp(n) vvau_node.set_nodeattr("PE", pe) new_model = new_model.transform(SetExecMode("rtlsim")) new_model = new_model.transform(GiveUniqueNodeNames()) new_model = new_model.transform(PrepareIP("xc7z020clg400-1", 5)) new_model = new_model.transform(HLSSynthIP()) new_model = new_model.transform(PrepareRTLSim()) assert oxe.compare_execution(model, new_model, input_dict)
def test_move_scalar_add_past_matmul(): top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [1, 2]) add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [1, 1]) matmul_param = oh.make_tensor_value_info("matmul_param", TensorProto.FLOAT, [2, 2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [1, 2]) modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=[add_param, matmul_param], nodes=[ oh.make_node("Add", ["top_in", "add_param"], ["middle"]), oh.make_node("MatMul", ["middle", "matmul_param"], ["top_out"]), ], ) ) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) model.set_initializer("add_param", np.asarray([[3]], dtype=np.float32)) model.set_initializer( "matmul_param", np.asarray([[2, 4], [-1, 1]], dtype=np.float32) ) new_model = model.transform(MoveScalarAddPastMatMul()) inp_dict = {"top_in": np.asarray([[-1.0, 1.0]], dtype=np.float32)} assert ox.compare_execution(model, new_model, inp_dict) assert new_model.graph.node[0].op_type == "MatMul" assert new_model.graph.node[1].op_type == "Add" assert new_model.graph.node[0].output[0] == new_model.graph.node[1].input[0]
def test_move_identical_op_past_join_op(perm): model = create_model(perm) # Create input data input0_tensor_name = model.graph.input[0].name input1_tensor_name = model.graph.input[1].name # Note: it is assumed that both tensors have the same shape and data type input_shape = model.get_tensor_shape(input0_tensor_name) input_dtype = model.get_tensor_datatype(input0_tensor_name) input_val = gen_finn_dt_tensor(input_dtype, input_shape) input_dict = {} input_dict[input0_tensor_name] = input_val input_dict[input1_tensor_name] = input_val model_transformed = model.transform(MoveTransposePastJoinAdd()) assert oxe.compare_execution(model, model_transformed, input_dict) # Check if order changed node0_input0_model = model.find_consumers(model.graph.input[0].name)[0].op_type node1_input1_model = model.find_consumers(model.graph.input[1].name)[0].op_type node0_input0_model_transformed = model_transformed.find_consumers( model_transformed.graph.input[0].name )[0].op_type node1_input1_model_transformed = model_transformed.find_consumers( model_transformed.graph.input[1].name )[0].op_type assert node0_input0_model != node0_input0_model_transformed assert node1_input1_model != node1_input1_model_transformed
def test_move_flatten_past_affine(data_layout, batch_size): if data_layout == DataLayout.NHWC: ishape = [batch_size, 1, 1, 1024] oshape = [batch_size, 1000] else: ishape = [batch_size, 1024, 1, 1] oshape = [batch_size, 1000] inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, ishape) a0 = helper.make_tensor_value_info("a1", TensorProto.FLOAT, [1024, 1000]) a1 = helper.make_tensor_value_info("a2", TensorProto.FLOAT, []) a2 = helper.make_tensor_value_info("a3", TensorProto.FLOAT, [1000]) outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, oshape) flatten_node = helper.make_node("Flatten", ["inp"], ["flatten_out"]) matmul_node = helper.make_node("MatMul", ["flatten_out", "a0"], ["matmul_out"]) mul_node = helper.make_node("Mul", ["matmul_out", "a1"], ["mul_out"]) add_node = helper.make_node("Add", ["mul_out", "a2"], ["outp"]) graph = helper.make_graph( nodes=[flatten_node, matmul_node, mul_node, add_node], name="move-reshape-graph", inputs=[inp], outputs=[outp], value_info=[a0, a1, a2], ) model = helper.make_model(graph, producer_name="move_reshape_model") model = ModelWrapper(model) # initialize values a0_values = gen_finn_dt_tensor(DataType["TERNARY"], [1024, 1000]) model.set_initializer("a0", a0_values) a1_values = np.random.uniform(low=0.1, high=0.99, size=(1)).astype(np.float32) model.set_initializer("a1", a1_values) a2_values = np.random.uniform(low=-1, high=1, size=(1000)).astype(np.float32) model.set_initializer("a2", a2_values) model.set_tensor_datatype("inp", DataType["INT2"]) model.set_tensor_layout("inp", data_layout) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) model = model.transform(InferDataLayouts()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) # compare execution before and after transformation inp_values = gen_finn_dt_tensor(DataType["INT2"], ishape) idict = {model.graph.input[0].name: inp_values} model_transformed = model.transform(MoveFlattenPastAffine()) assert oxe.compare_execution(model, model_transformed, idict) # depending on data layout check if graph is transformed or not if data_layout == DataLayout.NHWC: # check if nodes have new order in transformed graph assert model.graph != model_transformed.graph assert model_transformed.graph.node[-1].op_type == "Flatten" else: assert model.graph == model_transformed.graph
def test_conv_lowering_conv_1x1(): np.random.seed(0) in_feature_dim_h = 7 in_feature_dim_w = 7 in_chn = 3 kernel_size = 1 out_feature_dim_h = in_feature_dim_h out_feature_dim_w = in_feature_dim_w input_shape = [1, in_chn, in_feature_dim_h, in_feature_dim_w] output_shape = [1, in_chn, out_feature_dim_h, out_feature_dim_w] conv_param_shape = [in_chn, in_chn, kernel_size, kernel_size] conv_config = {} conv_config["dilations"] = [1, 1] conv_config["group"] = 1 conv_config["kernel_shape"] = [kernel_size, kernel_size] conv_config["pads"] = [0, 0, 0, 0] conv_config["strides"] = [1, 1] top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, output_shape) value_info = [ oh.make_tensor_value_info("p1", TensorProto.FLOAT, conv_param_shape) ] modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=value_info, nodes=[ oh.make_node("Conv", ["top_in", "p1"], ["top_out"], **conv_config) ], )) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) model.set_initializer("p1", np.random.rand(*conv_param_shape).astype(np.float32)) new_model = model.transform(LowerConvsToMatMul()) inp_dict = {"top_in": np.random.rand(*input_shape).astype(np.float32)} assert oxe.compare_execution(model, new_model, inp_dict) assert new_model.graph.node[0].op_type == "Transpose" assert new_model.graph.node[1].op_type == "MatMul" assert new_model.graph.node[2].op_type == "Transpose" assert len(new_model.graph.node) == 3
def test_batchnorm_to_affine_lfc_w1a1(): lfc = get_test_model_trained("LFC", 1, 1) bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) new_model = model.transform(BatchNormToAffine()) # load one of the test vectors raw_i = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/input_0.pb") input_tensor = onnx.load_tensor_from_string(raw_i) input_dict = {"0": nph.to_array(input_tensor)} assert oxe.compare_execution(model, new_model, input_dict) os.remove(export_onnx_path)
def test_move_scalar_past_matmul_only_if_linear(test_args): scalar_op = test_args[0] transf_fxn = test_args[1] input_shape = [1, 2] matmul_shape = [2, 2] top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, input_shape) p1 = oh.make_tensor_value_info("p1", TensorProto.FLOAT, [1, 1]) p2 = oh.make_tensor_value_info("p2", TensorProto.FLOAT, matmul_shape) p3 = oh.make_tensor_value_info("p3", TensorProto.FLOAT, matmul_shape) p4 = oh.make_tensor_value_info("p4", TensorProto.FLOAT, matmul_shape) modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=[p1, p2, p3, p4], nodes=[ oh.make_node(scalar_op, ["top_in", "p1"], ["t1"]), oh.make_node("MatMul", ["t1", "p2"], ["fork"]), oh.make_node("MatMul", ["fork", "p3"], ["t3"]), oh.make_node(scalar_op, ["t3", "fork"], ["t4"]), oh.make_node("MatMul", ["t4", "p4"], ["top_out"]), ], )) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) np.random.seed(0) model.set_initializer("p1", np.random.rand(1, 1).astype(np.float32)) model.set_initializer("p2", np.random.rand(*matmul_shape).astype(np.float32)) model.set_initializer("p3", np.random.rand(*matmul_shape).astype(np.float32)) model.set_initializer("p4", np.random.rand(*matmul_shape).astype(np.float32)) # Transform new_model = model.transform(transf_fxn) # Test inp_dict = {"top_in": np.random.rand(*input_shape).astype(np.float32)} assert ox.compare_execution(model, new_model, inp_dict) assert new_model.graph.node[0].op_type == "MatMul" assert new_model.graph.node[1].op_type == scalar_op assert new_model.graph.node[2].op_type == "MatMul" assert new_model.graph.node[3].op_type == scalar_op assert new_model.graph.node[4].op_type == "MatMul"
def test_extend_partition(p, extend_id): if p == 0: if extend_id != [0]: pytest.skip("Only the first partition node can be extended") if p == 1: if extend_id != [1]: pytest.skip("Only the second partition node can be extended") else: extend_id = [6 ] # The 6th node is the index of the GenericPartition # node, so we set the index to the right value model = create_model() # Partition the model first partitionings = [ { 0: range(0, 6) }, { 0: range(6, 12) }, { 0: range(0, 6), 1: range(6, 12) }, ] partitioning = partitionings[p] model = model.transform(PartitionFromDict(partitioning)) # Create input data input0_tensor_name = model.graph.input[0].name input_shape = model.get_tensor_shape(input0_tensor_name) input_dtype = model.get_tensor_datatype(input0_tensor_name) input_val = gen_finn_dt_tensor(input_dtype, input_shape) input_dict = {} input_dict[input0_tensor_name] = input_val # Extend the model model_extended = model.transform(ExtendPartition(extend_id)) assert oxe.compare_execution(model, model_extended, input_dict) # Check if FINN data_types are retained for n in model_extended.graph.node: if n.op_type == "Conv": assert model_extended.get_tensor_datatype( n.input[1]) == DataType.INT4
def test_sign_to_thres(): lfc = get_test_model_trained("LFC", 1, 1) bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) new_model = model.transform(ConvertSignToThres()) assert new_model.graph.node[3].op_type == "MultiThreshold" # load one of the test vectors raw_i = get_data("finn", "data/onnx/mnist-conv/test_data_set_0/input_0.pb") input_tensor = onnx.load_tensor_from_string(raw_i) input_dict = {"0": nph.to_array(input_tensor)} assert oxe.compare_execution(model, new_model, input_dict) os.remove(export_onnx_path)
def test_collapse_repeated_only_if_linear(test_args): scalar_op = test_args[0] transf_fxn = test_args[1] input_shape = [4, 4] output_shape = input_shape top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, output_shape) value_info = [oh.make_tensor_value_info("p1", TensorProto.FLOAT, [1])] value_info += [oh.make_tensor_value_info("p2", TensorProto.FLOAT, [1])] value_info += [oh.make_tensor_value_info("p3", TensorProto.FLOAT, [1])] value_info += [oh.make_tensor_value_info("p4", TensorProto.FLOAT, [1])] value_info += [oh.make_tensor_value_info("p5", TensorProto.FLOAT, [1])] modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=value_info, nodes=[ oh.make_node(scalar_op, ["top_in", "p2"], ["t1"]), oh.make_node(scalar_op, ["t1", "p1"], ["t2"]), oh.make_node(scalar_op, ["t2", "p3"], ["t3"]), oh.make_node(scalar_op, ["t2", "p4"], ["t4"]), oh.make_node(scalar_op, ["t3", "t4"], ["t5"]), oh.make_node(scalar_op, ["t5", "p5"], ["top_out"]), ], ) ) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) np.random.seed(0) model.set_initializer("p1", *np.random.rand(1).astype(np.float32)) model.set_initializer("p2", *np.random.rand(1).astype(np.float32)) model.set_initializer("p3", *np.random.rand(1).astype(np.float32)) model.set_initializer("p4", *np.random.rand(1).astype(np.float32)) model.set_initializer("p5", *np.random.rand(1).astype(np.float32)) # Transform new_model = model.transform(transf_fxn) # Test inp_dict = {"top_in": np.random.rand(*input_shape).astype(np.float32)} assert ox.compare_execution(model, new_model, inp_dict) assert len(new_model.graph.node) == 5
def test_move_add_past_mul_only_if_linear(): top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2]) value_info = [ oh.make_tensor_value_info("add1_param", TensorProto.FLOAT, [1]) ] value_info += [ oh.make_tensor_value_info("mul1_param", TensorProto.FLOAT, [1]) ] value_info += [ oh.make_tensor_value_info("mul2_param", TensorProto.FLOAT, [1]) ] value_info += [ oh.make_tensor_value_info("mul3_param", TensorProto.FLOAT, [1]) ] modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=value_info, nodes=[ oh.make_node("Add", ["top_in", "add1_param"], ["t1"]), oh.make_node("Mul", ["t1", "mul1_param"], ["fork"]), oh.make_node("Mul", ["fork", "mul2_param"], ["t3"]), oh.make_node("Add", ["t3", "fork"], ["t4"]), oh.make_node("Mul", ["t4", "mul3_param"], ["top_out"]), ], )) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) np.random.seed(0) model.set_initializer("add1_param", np.random.rand(2).astype(np.float32)) model.set_initializer("mul1_param", np.random.rand(2).astype(np.float32)) model.set_initializer("mul2_param", np.random.rand(2).astype(np.float32)) model.set_initializer("mul3_param", np.random.rand(2).astype(np.float32)) new_model = model.transform(MoveAddPastMul()) inp_dict = {"top_in": np.random.rand(2).astype(np.float32)} assert ox.compare_execution(model, new_model, inp_dict) assert new_model.graph.node[0].op_type == "Mul" assert new_model.graph.node[1].op_type == "Add" assert new_model.graph.node[2].op_type == "Mul" assert new_model.graph.node[3].op_type == "Add" assert new_model.graph.node[4].op_type == "Mul"
def test_absorb_opposite_transposes(): np.random.seed(0) input_shape = [1, 3, 4, 2] top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, input_shape) value_info = [ oh.make_tensor_value_info("add_param_0", TensorProto.FLOAT, [1]) ] value_info += [ oh.make_tensor_value_info("add_param_1", TensorProto.FLOAT, [1]) ] value_info += [ oh.make_tensor_value_info("mul_param_0", TensorProto.FLOAT, [1]) ] modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=value_info, nodes=[ oh.make_node("Add", ["top_in", "add_param_0"], ["t0"]), oh.make_node("Transpose", ["t0"], ["t1"], perm=[0, 2, 3, 1]), oh.make_node("Transpose", ["t1"], ["t2"], perm=[0, 3, 1, 2]), oh.make_node("Add", ["t2", "add_param_1"], ["t3"]), oh.make_node("Transpose", ["t3"], ["t4"], perm=[0, 2, 3, 1]), oh.make_node("Transpose", ["t4"], ["t5"], perm=[0, 3, 1, 2]), oh.make_node("Add", ["t5", "t2"], ["t6"]), oh.make_node("Mul", ["t6", "mul_param_0"], ["top_out"]), ], )) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) model.set_initializer("add_param_0", np.asarray([1], dtype=np.float32)) model.set_initializer("add_param_1", np.asarray([3], dtype=np.float32)) model.set_initializer("mul_param_0", np.asarray([2], dtype=np.float32)) new_model = model.transform(AbsorbConsecutiveTransposes()) new_model = new_model.transform(InferShapes()) inp_dict = {"top_in": np.random.rand(*input_shape).astype(np.float32)} assert ox.compare_execution(model, model, inp_dict) assert len(new_model.graph.node) == 4 for n in new_model.graph.node: assert new_model.graph.node[0].op_type != "Transpose"
def test_move_flatten_past_affine(data_layout, batch_size): if data_layout == DataLayout.NHWC: ishape = [batch_size, 1, 1, 1024] oshape = [batch_size, 1024] else: ishape = [batch_size, 1024, 1, 1] oshape = [batch_size, 1024] inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, ishape) outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, oshape) flatten_node = helper.make_node("Flatten", ["inp"], ["outp"]) graph = helper.make_graph( nodes=[flatten_node], name="move-flatten-graph", inputs=[inp], outputs=[outp], ) model = helper.make_model(graph, producer_name="move_flatten_model") model = ModelWrapper(model) model.set_tensor_datatype("inp", DataType.INT2) model.set_tensor_layout("inp", data_layout) model = model.transform(InsertTopK()) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) model = model.transform(InferDataLayouts()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) # compare execution before and after transformation inp_values = gen_finn_dt_tensor(DataType.INT2, ishape) idict = {model.graph.input[0].name: inp_values} model_transformed = model.transform(MoveFlattenPastTopK()) assert oxe.compare_execution(model, model_transformed, idict) # depending on data layout check if graph is transformed or not if data_layout == DataLayout.NHWC: # check if nodes have new order in transformed graph assert model.graph != model_transformed.graph assert model_transformed.graph.node[-1].op_type == "Flatten" else: assert model.graph == model_transformed.graph
def test_factor_out_mul_sign_magnitude(): top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [1, 2]) mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [1, 2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [1, 2]) modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=[mul_param], nodes=[oh.make_node("Mul", ["top_in", "mul_param"], ["top_out"])], )) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) model.set_initializer("mul_param", np.asarray([[-1, 4]], dtype=np.float32)) new_model = model.transform(FactorOutMulSignMagnitude()) inp_dict = {"top_in": np.asarray([[-1.0, 1.0]], dtype=np.float32)} assert ox.compare_execution(model, new_model, inp_dict)
def test_move_add_past_mul_multi(): top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2]) add_param_0 = oh.make_tensor_value_info("add_param_0", TensorProto.FLOAT, [2]) mul_param_0 = oh.make_tensor_value_info("mul_param_0", TensorProto.FLOAT, [2]) add_param_1 = oh.make_tensor_value_info("add_param_1", TensorProto.FLOAT, [2]) mul_param_1 = oh.make_tensor_value_info("mul_param_1", TensorProto.FLOAT, [2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2]) modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=[add_param_0, mul_param_0, add_param_1, mul_param_1], nodes=[ oh.make_node("Add", ["top_in", "add_param_0"], ["middle_0"]), oh.make_node("Mul", ["middle_0", "mul_param_0"], ["middle_1"]), oh.make_node("Add", ["middle_1", "add_param_1"], ["middle_2"]), oh.make_node("Mul", ["middle_2", "mul_param_1"], ["top_out"]), ], )) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) model.set_initializer("add_param_0", np.asarray([1, 3], dtype=np.float32)) model.set_initializer("mul_param_0", np.asarray([2, 4], dtype=np.float32)) model.set_initializer("add_param_1", np.asarray([-1, 3], dtype=np.float32)) model.set_initializer("mul_param_1", np.asarray([2, -4], dtype=np.float32)) new_model = model.transform(MoveAddPastMul()) inp_dict = {"top_in": np.asarray([-1.0, 1.0], dtype=np.float32)} assert ox.compare_execution(model, new_model, inp_dict) assert new_model.graph.node[0].op_type == "Mul" assert new_model.graph.node[1].op_type == "Mul" assert new_model.graph.node[2].op_type == "Add" assert new_model.graph.node[3].op_type == "Add" for i in range(len(new_model.graph.node) - 1): assert new_model.graph.node[i].output[0] == new_model.graph.node[ i + 1].input[0]
def test_move_add_past_mul_single(): top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2]) add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2]) mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2]) modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=[add_param, mul_param], nodes=[ oh.make_node("Add", ["top_in", "add_param"], ["middle"]), oh.make_node("Mul", ["middle", "mul_param"], ["top_out"]), ], ) ) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) model.set_initializer("add_param", np.asarray([1, 3], dtype=np.float32)) model.set_initializer("mul_param", np.asarray([2, 4], dtype=np.float32)) new_model = model.transform(MoveAddPastMul()) inp_dict = {"top_in": np.asarray([-1.0, 1.0], dtype=np.float32)} assert ox.compare_execution(model, new_model, inp_dict)
def test_convert_to_hls_conv_fc_transition(conv_config, depthwise, use_reshape): np.random.seed(0) idt = DataType["UINT4"] odt = DataType["UINT4"] conv_weight_dt = DataType["INT4"] fc_weight_dt = DataType["INT4"] input_shape, kernel_shape, stride, pad = conv_config kernel_size_h, kernel_size_w = kernel_shape input_size_h, input_size_w = input_shape stride_h, stride_w = stride pad_h, pad_w = pad in_chn = 4 fc_filters = 16 if depthwise is True: group = out_chn = in_chn conv_param_shape = [out_chn, 1, kernel_size_h, kernel_size_w] else: group = 1 out_chn = 8 conv_param_shape = [out_chn, in_chn, kernel_size_h, kernel_size_w] output_size_h = compute_conv_output_dim(input_size_h, kernel_size_h, stride_h, 2 * pad_h) output_size_w = compute_conv_output_dim(input_size_w, kernel_size_w, stride_w, 2 * pad_w) input_shape = [1, in_chn, input_size_h, input_size_w] fc_param_shape = [out_chn * output_size_h * output_size_w, fc_filters] output_shape = [1, fc_filters] conv_config = {} conv_config["dilations"] = [1, 1] conv_config["group"] = group conv_config["kernel_shape"] = [kernel_size_h, kernel_size_w] conv_config["pads"] = [pad_h, pad_w, pad_h, pad_w] conv_config["strides"] = [stride_h, stride_w] global_in = helper.make_tensor_value_info("global_in", TensorProto.FLOAT, input_shape) global_out = helper.make_tensor_value_info("global_out", TensorProto.FLOAT, output_shape) value_info = [ helper.make_tensor_value_info("conv_param", TensorProto.FLOAT, conv_param_shape), helper.make_tensor_value_info("thres1_param", TensorProto.FLOAT, (out_chn, 15)), helper.make_tensor_value_info("matmul_param", TensorProto.FLOAT, fc_param_shape), helper.make_tensor_value_info("thres2_param", TensorProto.FLOAT, (fc_filters, 15)), helper.make_tensor_value_info("reshape_shape", TensorProto.INT64, []), ] if use_reshape: flatten_node = helper.make_node("Reshape", ["thres1_out", "reshape_shape"], ["flatten_out"]) else: flatten_node = helper.make_node("Flatten", ["thres1_out"], ["flatten_out"], axis=1) modelproto = helper.make_model( helper.make_graph( name="test", inputs=[global_in], outputs=[global_out], value_info=value_info, nodes=[ helper.make_node("Conv", ["global_in", "conv_param"], ["conv_out"], **conv_config), helper.make_node( "MultiThreshold", ["conv_out", "thres1_param"], ["thres1_out"], domain="finn.custom_op.general", out_dtype="UINT4", ), flatten_node, helper.make_node("MatMul", ["flatten_out", "matmul_param"], ["matmul_out"]), helper.make_node( "MultiThreshold", ["matmul_out", "thres2_param"], ["global_out"], domain="finn.custom_op.general", out_dtype="UINT4", ), ], )) model = ModelWrapper(modelproto) model.set_tensor_datatype("global_in", idt) model.set_tensor_layout("global_in", DataLayout.NCHW) model.set_tensor_datatype("global_out", odt) model.set_tensor_datatype("conv_param", conv_weight_dt) model.set_tensor_datatype("matmul_param", fc_weight_dt) model.set_tensor_datatype("thres1_param", DataType["INT32"]) model.set_tensor_datatype("thres2_param", DataType["INT32"]) model.set_initializer("conv_param", gen_finn_dt_tensor(conv_weight_dt, conv_param_shape)) model.set_initializer("thres1_param", get_multithreshold_rand_params(out_chn, 15, seed=0)) model.set_initializer( "thres2_param", get_multithreshold_rand_params(fc_filters, 15, seed=0)) model.set_initializer("matmul_param", gen_finn_dt_tensor(fc_weight_dt, fc_param_shape)) model.set_initializer("reshape_shape", np.array([1, -1])) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) model = model.transform(InferDataLayouts()) # streamlining new_model = model.transform(MoveScalarLinearPastInvariants()) new_model = new_model.transform(Streamline()) new_model = new_model.transform(LowerConvsToMatMul()) new_model = new_model.transform(absorb.AbsorbTransposeIntoMultiThreshold()) new_model = new_model.transform(Streamline()) new_model = new_model.transform(InferDataLayouts()) new_model = new_model.transform(RemoveUnusedTensors()) # convert_to_hls if depthwise is True: new_model = new_model.transform(to_hls.InferVVAU()) new_model = new_model.transform(to_hls.InferQuantizedStreamingFCLayer()) new_model = new_model.transform(to_hls.InferThresholdingLayer()) new_model = new_model.transform(to_hls.InferConvInpGen()) new_model = new_model.transform(to_hls.InferStreamingMaxPool()) new_model = new_model.transform(RemoveCNVtoFCFlatten()) new_model = new_model.transform(absorb.AbsorbConsecutiveTransposes()) new_model = new_model.transform(GiveUniqueNodeNames()) new_model = new_model.transform(InferDataLayouts()) # prepare cppsim new_model = new_model.transform(PrepareCppSim()) new_model = new_model.transform(CompileCppSim()) new_model = new_model.transform(SetExecMode("cppsim")) # check for correct execution x = gen_finn_dt_tensor(idt, input_shape) inp_dict = {model.graph.input[0].name: x} assert oxe.compare_execution(model, new_model, inp_dict) num_transpose = len(new_model.get_nodes_by_op_type("Transpose")) num_flatten = len(new_model.get_nodes_by_op_type("Flatten")) num_reshape = len(new_model.get_nodes_by_op_type("Reshape")) # check if transpose->flatten was removed assert num_transpose == 1 and num_flatten == 0 and num_reshape == 0
def test_convert_to_hls_conv_layer(conv_config, depthwise, exec_mode): kernel_size, stride, pad = conv_config np.random.seed(0) idt = DataType.UINT4 in_feature_dim = 7 in_chn = 16 if depthwise is True: group = out_chn = in_chn conv_param_shape = [out_chn, 1, kernel_size, kernel_size] else: group = 1 out_chn = 20 conv_param_shape = [out_chn, in_chn, kernel_size, kernel_size] out_feature_dim = compute_conv_output_dim(in_feature_dim, kernel_size, stride, pad) input_shape = [1, in_chn, in_feature_dim, in_feature_dim] output_shape = [1, out_chn, out_feature_dim, out_feature_dim] conv_weight_dt = DataType.UINT4 conv_config = {} conv_config["dilations"] = [1, 1] conv_config["group"] = group conv_config["kernel_shape"] = [kernel_size, kernel_size] conv_config["pads"] = [pad, pad, pad, pad] conv_config["strides"] = [stride, stride] top_in = helper.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) top_out = helper.make_tensor_value_info("top_out", TensorProto.FLOAT, output_shape) value_info = [ helper.make_tensor_value_info("p1", TensorProto.FLOAT, conv_param_shape) ] modelproto = helper.make_model( helper.make_graph( name="conv_test", inputs=[top_in], outputs=[top_out], value_info=value_info, nodes=[ helper.make_node("Conv", ["top_in", "p1"], ["top_out"], **conv_config) ], ) ) model = ModelWrapper(modelproto) model.set_tensor_datatype("top_in", idt) model.set_tensor_datatype("top_out", idt) model.set_tensor_datatype("p1", conv_weight_dt) model.set_initializer("p1", gen_finn_dt_tensor(conv_weight_dt, conv_param_shape)) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) new_model = model.transform(LowerConvsToMatMul()) new_model = new_model.transform(to_hls.InferConvInpGen()) if depthwise is True: new_model = new_model.transform(to_hls.InferVVAU()) else: new_model = new_model.transform(to_hls.InferQuantizedStreamingFCLayer()) fc_node = new_model.get_nodes_by_op_type("StreamingFCLayer_Batch")[0] fc_inst = getCustomOp(fc_node) mw = fc_inst.get_nodeattr("MW") mh = fc_inst.get_nodeattr("MH") pe_cands = list(filter(lambda x: mh % x == 0, range(2, mh + 1))) simd_cands = list(filter(lambda x: mw % x == 0, range(2, mw + 1))) fc_inst.set_nodeattr("PE", pe_cands[0]) fc_inst.set_nodeattr("SIMD", simd_cands[0]) new_model = new_model.transform(GiveUniqueNodeNames()) new_model = new_model.transform(InferShapes()) new_model = new_model.transform(InferDataTypes()) if exec_mode == "cppsim": new_model = new_model.transform(PrepareCppSim()) new_model = new_model.transform(CompileCppSim()) new_model = new_model.transform(SetExecMode("cppsim")) elif exec_mode == "rtlsim": new_model = new_model.transform(SetExecMode("rtlsim")) new_model = new_model.transform(GiveUniqueNodeNames()) new_model = new_model.transform(PrepareIP("xc7z020clg400-1", 5)) new_model = new_model.transform(HLSSynthIP()) new_model = new_model.transform(PrepareRTLSim()) else: raise Exception("Unknown exec_mode") x = gen_finn_dt_tensor(idt, input_shape) inp_dict = {model.graph.input[0].name: x} assert oxe.compare_execution(model, new_model, inp_dict) if kernel_size == 1 and stride > 1 and pad == 0: assert new_model.graph.node[1].op_type == "DownSampler" if exec_mode == "rtlsim": node = new_model.get_nodes_by_op_type("DownSampler")[0] inst = getCustomOp(node) cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") exp_cycles_dict = new_model.analysis(exp_cycles_per_layer) exp_cycles = exp_cycles_dict[node.name] assert np.isclose(exp_cycles, cycles_rtlsim, atol=11) assert exp_cycles != 0 if pad == 1: padding_node = new_model.get_nodes_by_op_type("FMPadding_Batch")[0] padding_inst = getCustomOp(padding_node) assert padding_inst.get_nodeattr("SIMD") == in_chn if depthwise is True and exec_mode == "rtlsim": node = new_model.get_nodes_by_op_type("Vector_Vector_Activate_Batch")[0] inst = getCustomOp(node) cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") exp_cycles_dict = new_model.analysis(exp_cycles_per_layer) exp_cycles = exp_cycles_dict[node.name] assert np.isclose(exp_cycles, cycles_rtlsim, atol=11) assert exp_cycles != 0
def test_absorb_transp_into_flatten(perm, shape, ishape, data_layout): inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, ishape) transp_node = helper.make_node("Transpose", ["inp"], ["transp_out"], perm=perm) dummy_in = np.random.uniform(low=0, high=1, size=tuple(ishape)).astype(np.float32) if shape is None: shape_node = helper.make_node("Flatten", ["transp_out"], ["outp"]) dummy_in = dummy_in.transpose(tuple(perm)) oshape = dummy_in.reshape(dummy_in.shape[0], -1).shape outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, oshape) shape0 = None else: shape0 = helper.make_tensor_value_info("shape0", TensorProto.FLOAT, shape) shape_node = helper.make_node("Reshape", ["transp_out", "shape0"], ["outp"]) oshape = dummy_in.transpose(tuple(perm)).reshape(tuple(shape)).shape outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, oshape) graph = helper.make_graph( nodes=[transp_node, shape_node], name="absorb-transpose-graph", inputs=[inp], outputs=[outp], ) model = helper.make_model(graph, producer_name="absorb_transpose_model") model = ModelWrapper(model) if shape is not None: model.graph.value_info.append(shape0) model.set_initializer("shape0", np.asarray(shape)) if data_layout == "NCHW": model.set_tensor_layout("inp", DataLayout.NCHW) else: model.set_tensor_layout("inp", DataLayout.NHWC) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) model = model.transform(InferDataLayouts()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) # model.save("test.onnx") model_transformed = model.transform(AbsorbTransposeIntoFlatten()) # model_transformed.save("test2.onnx") # verify transformation inp_values = np.random.uniform(low=-1, high=1, size=tuple(ishape)).astype( np.float32 ) idict = {model.graph.input[0].name: inp_values} assert oxe.compare_execution(model, model_transformed, idict) # only some of the parameter combinations lead to a graph that will be changed when # AbsorbTransposeIntoFlatten is applied if shape == [-1, 1]: # not a flatten operation, so the graph will not be changed assert model.graph == model_transformed.graph elif perm == [ 3, 2, 0, 1, ]: # the first dimension is also part of the transpose operation # so the graph will not be changed assert model.graph == model_transformed.graph # the following cases are the ones in which the model is transformed # because we tested the parameters shape and perm befire we can only consider ishape # and data_layout (the transformed model should only contain a "Flatten" node) elif ishape == [1, 1, 1, 4] and data_layout == "NHWC": assert model_transformed.graph.node[0].op_type == "Flatten" elif ishape == [2, 4, 1, 1] and data_layout == "NCHW" and shape is None: # If the first dimension of the input tensor is not 1, flatten and # reshape (with shape = [1, -1]) would lead to different results assert model_transformed.graph.node[0].op_type == "Flatten" # all other cases lead to an unchanged model else: assert model.graph == model_transformed.graph
def test_move_scalar_past_conv(test_args, padding): scalar_op = test_args[0] transf_fxn = test_args[1] in_feature_dim = 7 in_chn = 3 stages = 2 kernel_size = 3 out_feature_dim = (in_feature_dim if padding else in_feature_dim - (kernel_size // 2 * 2) * stages) input_shape = [1, in_chn, in_feature_dim, in_feature_dim] output_shape = [1, in_chn, out_feature_dim, out_feature_dim] conv_param_shape = [in_chn, in_chn, kernel_size, kernel_size] conv_config = {} conv_config["dilations"] = [1, 1] conv_config["group"] = 1 conv_config["kernel_shape"] = [kernel_size, kernel_size] if padding: conv_config["pads"] = [1, 1, 1, 1] else: conv_config["pads"] = [0, 0, 0, 0] conv_config["strides"] = [1, 1] top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, output_shape) value_info = [oh.make_tensor_value_info("p1", TensorProto.FLOAT, [1])] value_info += [ oh.make_tensor_value_info("p2", TensorProto.FLOAT, conv_param_shape) ] value_info += [ oh.make_tensor_value_info("p3", TensorProto.FLOAT, conv_param_shape) ] modelproto = oh.make_model( oh.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=value_info, nodes=[ oh.make_node(scalar_op, ["top_in", "p1"], ["t1"]), oh.make_node("Conv", ["t1", "p2"], ["t2"], **conv_config), oh.make_node("Conv", ["t2", "p3"], ["top_out"], **conv_config), ], )) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) np.random.seed(0) model.set_initializer("p1", *np.random.rand(1).astype(np.float32)) model.set_initializer("p2", np.random.rand(*conv_param_shape).astype(np.float32)) model.set_initializer("p3", np.random.rand(*conv_param_shape).astype(np.float32)) new_model = model.transform(transf_fxn) inp_dict = {"top_in": np.random.rand(*input_shape).astype(np.float32)} assert ox.compare_execution(model, new_model, inp_dict) if scalar_op == "Add": if padding: assert new_model.graph.node[0].op_type == scalar_op assert new_model.graph.node[1].op_type == "Conv" assert new_model.graph.node[2].op_type == "Conv" else: assert new_model.graph.node[0].op_type == "Conv" assert new_model.graph.node[1].op_type == "Conv" assert new_model.graph.node[2].op_type == scalar_op else: assert new_model.graph.node[0].op_type == "Conv" assert new_model.graph.node[1].op_type == "Conv" assert new_model.graph.node[2].op_type == scalar_op
def test_move_transpose_past_scalar_mul(perm, scalar, data_layout): inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, 2, 3, 4]) # to determine out_size we need to calculate with "perm" for this test case dummy_in = np.random.uniform(low=0, high=1, size=(1, 2, 3, 4)).astype(np.float32) out_size = dummy_in.transpose(tuple(perm)).shape if scalar is True: a0_size = [] else: a0_size = out_size a0 = helper.make_tensor_value_info("a0", TensorProto.FLOAT, a0_size) outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, out_size) transp_node = helper.make_node("Transpose", ["inp"], ["transp_out"], perm=perm) mul_node = helper.make_node("Mul", ["transp_out", "a0"], ["outp"]) graph = helper.make_graph( nodes=[transp_node, mul_node], name="mv-transpose-graph", inputs=[inp], outputs=[outp], value_info=[a0], ) model = helper.make_model(graph, producer_name="mv_transpose_model") model = ModelWrapper(model) # initialize values a0_values = np.random.uniform(low=0, high=1, size=tuple(a0_size)).astype(np.float32) model.set_initializer("a0", a0_values) if data_layout is not None: model.set_tensor_layout("inp", data_layout) model = model.transform(InferDataLayouts()) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) # compare execution before and after transformation inp_values = np.random.uniform(low=0, high=1, size=(1, 2, 3, 4)).astype(np.float32) idict = {model.graph.input[0].name: inp_values} model_transformed = model.transform(MoveTransposePastScalarMul()) assert oxe.compare_execution(model, model_transformed, idict) # check if order changed if scalar is True and data_layout is not None: assert model_transformed.graph.node[0] != model.graph.node[0] assert model_transformed.graph.node[1] != model.graph.node[1] assert model_transformed.graph.node[0].op_type == "Mul" assert model_transformed.graph.node[1].op_type == "Transpose" mul_input = model_transformed.graph.node[0].input[0] mul_output = model_transformed.graph.node[0].output[0] assert model_transformed.get_tensor_layout(mul_input) == data_layout assert model_transformed.get_tensor_layout(mul_output) == data_layout else: assert model_transformed.graph.node[0] == model.graph.node[0] assert model_transformed.graph.node[1] == model.graph.node[1] if data_layout is not None: mul_input = model_transformed.graph.node[1].input[0] mul_output = model_transformed.graph.node[1].output[0] assert model_transformed.get_tensor_layout( mul_input) != data_layout assert model_transformed.get_tensor_layout( mul_output) != data_layout
def test_move_past_fork(ch, ifmdim): # generate test vectors of correct shape if ifmdim == -1: input_shape = (1, ch) else: input_shape = (1, ch, ifmdim, ifmdim) top_in = helper.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) top_out = helper.make_tensor_value_info("top_out", TensorProto.FLOAT, input_shape) num_of_params = 8 value_info = [] for i in range(num_of_params): value_info += [ helper.make_tensor_value_info("p" + str(i), TensorProto.FLOAT, input_shape) ] add_1_to_move = helper.make_node("Add", ["top_in", "p0"], ["fork1"]) mul_1_to_move = helper.make_node("Mul", ["t5", "p4"], ["fork2"]) add_2_to_move = helper.make_node("Add", ["fork2", "p5"], ["t6"]) mul_1_not_to_move = helper.make_node("Mul", ["t8", "p7"], ["fork3"]) modelproto = helper.make_model( helper.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=value_info, nodes=[ # fork1 add_1_to_move, helper.make_node("Mul", ["fork1", "p1"], ["t2"]), helper.make_node("Mul", ["fork1", "p2"], ["t3"]), helper.make_node("Add", ["t2", "t3"], ["t4"]), helper.make_node("Add", ["t4", "p3"], ["t5"]), # fork2 mul_1_to_move, add_2_to_move, helper.make_node("Add", ["fork2", "p6"], ["t7"]), helper.make_node("Add", ["t6", "t7"], ["t8"]), # empty branches: do nothing mul_1_not_to_move, helper.make_node("Add", ["fork3", "fork3"], ["top_out"]), ], )) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) np.random.seed(0) for i in range(num_of_params): model.set_initializer("p" + str(i), np.random.rand(*input_shape).astype(np.float32)) # Transform new_model = model.transform(MoveLinearPastFork()) inp_dict = {"top_in": np.random.rand(*input_shape).astype(np.float32)} # Test assert oxe.compare_execution(model, new_model, inp_dict) assert not new_model.is_fork_node(add_1_to_move) assert not new_model.is_fork_node(mul_1_to_move) assert not new_model.is_fork_node(add_2_to_move) assert new_model.is_fork_node(mul_1_not_to_move) assert len(new_model.graph.node) == 14
def test_linear_past_eltwise_add_multiple_forks(ch, ifmdim): # generate test vectors of correct shape if ifmdim == -1: input_shape = (1, ch) else: input_shape = (1, ch, ifmdim, ifmdim) top_in = helper.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) top_out = helper.make_tensor_value_info("top_out", TensorProto.FLOAT, input_shape) num_of_params = 6 value_info = [] for i in range(num_of_params): value_info += [ helper.make_tensor_value_info("p" + str(i), TensorProto.FLOAT, input_shape) ] modelproto = helper.make_model( helper.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=value_info, nodes=[ helper.make_node("Add", ["top_in", "p0"], ["fork1"]), helper.make_node("Mul", ["fork1", "p1"], ["t2"]), helper.make_node("Mul", ["fork1", "p2"], ["t3"]), helper.make_node("Add", ["t2", "t3"], ["t4"]), helper.make_node("Mul", ["t4", "p3"], ["fork2"]), helper.make_node("Add", ["fork2", "p4"], ["t5"]), helper.make_node("Add", ["fork2", "p5"], ["t6"]), helper.make_node("Add", ["t5", "t6"], ["top_out"]), ], )) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) np.random.seed(0) for i in range(num_of_params): model.set_initializer("p" + str(i), np.random.rand(*input_shape).astype(np.float32)) # need equal mults: model.set_initializer("p2", model.get_initializer("p1")) # Transform new_model = model.transform(MoveLinearPastEltwiseAdd()) inp_dict = {"top_in": np.random.rand(*input_shape).astype(np.float32)} # Test assert oxe.compare_execution(model, new_model, inp_dict) assert new_model.graph.node[0].op_type == "Add" assert new_model.graph.node[1].op_type == "Add" assert new_model.graph.node[2].op_type == "Mul" assert new_model.graph.node[3].op_type == "Mul" assert new_model.graph.node[4].op_type == "Add" assert new_model.graph.node[5].op_type == "Add" assert len(new_model.graph.node) == 6
def test_move_maxpool_past_multithreshold(): # generate test vectors of correct shape ch = 64 ifmdim = 16 ofmdim = 16 // 4 input_shape = (1, ch, ifmdim, ifmdim) output_shape = (1, ch, ofmdim, ofmdim) top_in = helper.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) top_out = helper.make_tensor_value_info("top_out", TensorProto.FLOAT, output_shape) maxpool_config = {} maxpool_config["pads"] = [1, 1, 1, 1] maxpool_config["kernel_shape"] = [3, 3] maxpool_config["strides"] = [2, 2] value_info = [] thres1_shape = [1, 1] value_info += [ helper.make_tensor_value_info("thres1", TensorProto.FLOAT, thres1_shape) ] thres2_shape = [ch, 14] value_info += [ helper.make_tensor_value_info("thres2", TensorProto.FLOAT, thres2_shape) ] nodes = [] nodes += [ helper.make_node("MaxPool", ["top_in"], ["t1"], **maxpool_config) ] nodes += [ helper.make_node( "MultiThreshold", ["t1", "thres1"], ["t2"], domain="finn.custom_op.general", out_dtype="BIPOLAR", out_bias=-1.0, out_scale=1.0, ) ] nodes += [helper.make_node("MaxPool", ["t2"], ["t3"], **maxpool_config)] nodes += [ helper.make_node( "MultiThreshold", ["t3", "thres2"], ["top_out"], domain="finn.custom_op.general", out_dtype="UINT4", ) ] modelproto = helper.make_model( helper.make_graph( name="test", inputs=[top_in], outputs=[top_out], value_info=value_info, nodes=nodes, )) model = ModelWrapper(modelproto) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) model.set_initializer("thres1", np.array([[0]])) model.set_initializer( "thres2", get_multithreshold_rand_params(*thres2_shape, seed=0)) # Transform new_model = model.transform(MoveMaxPoolPastMultiThreshold()) inp_dict = {"top_in": np.random.rand(*input_shape).astype(np.float32)} # Test assert oxe.compare_execution(model, new_model, inp_dict) assert new_model.graph.node[0].op_type == "MaxPool" assert new_model.graph.node[1].op_type == "MultiThreshold" assert new_model.graph.node[2].op_type == "MultiThreshold" assert new_model.graph.node[3].op_type == "MaxPool" assert len(new_model.graph.node) == 4