Esempio n. 1
0
def step_out_of_context_synthesis(model: ModelWrapper, cfg: DataflowBuildConfig):
    """Run out-of-context synthesis and generate reports.
    Depends on the DataflowOutputType.STITCHED_IP output product."""
    if DataflowOutputType.OOC_SYNTH in cfg.generate_outputs:
        assert (
            DataflowOutputType.STITCHED_IP in cfg.generate_outputs
        ), "OOC needs stitched IP"
        model = model.transform(
            SynthOutOfContext(
                part=cfg._resolve_fpga_part(), clk_period_ns=cfg.synth_clk_period_ns
            )
        )
        report_dir = cfg.output_dir + "/report"
        os.makedirs(report_dir, exist_ok=True)
        ooc_res_dict = model.get_metadata_prop("res_total_ooc_synth")
        ooc_res_dict = eval(ooc_res_dict)

        estimate_network_performance = model.analysis(dataflow_performance)
        # add some more metrics to estimated performance
        n_clock_cycles_per_sec = float(ooc_res_dict["fmax_mhz"]) * (10 ** 6)
        est_fps = n_clock_cycles_per_sec / estimate_network_performance["max_cycles"]
        ooc_res_dict["estimated_throughput_fps"] = est_fps
        with open(report_dir + "/ooc_synth_and_timing.json", "w") as f:
            json.dump(ooc_res_dict, f, indent=2)
    return model
Esempio n. 2
0
def test_is_linear_forked_node_output():
    top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
    add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2])
    mul0_param = oh.make_tensor_value_info("mul0_param", TensorProto.FLOAT,
                                           [2])
    mul1_param = oh.make_tensor_value_info("mul1_param", TensorProto.FLOAT,
                                           [2])
    mul0_res = oh.make_tensor_value_info("mul0_res", TensorProto.FLOAT, [2])
    mul1_res = oh.make_tensor_value_info("mul1_res", TensorProto.FLOAT, [2])
    top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2])
    modelproto = oh.make_model(
        oh.make_graph(
            name="test",
            inputs=[top_in],
            outputs=[top_out],
            value_info=[add_param, mul0_param, mul1_param, mul0_res, mul1_res],
            nodes=[
                oh.make_node("Add", ["top_in", "add_param"], ["middle"]),
                oh.make_node("Mul", ["middle", "mul0_param"], ["mul0_res"]),
                oh.make_node("Mul", ["middle", "mul1_param"], ["mul1_res"]),
                oh.make_node("Add", ["mul0_res", "mul1_res"], ["top_out"]),
            ],
        ))
    model = ModelWrapper(modelproto)
    model = model.transform(InferShapes())
    ret = model.analysis(ta.is_linear)
    assert ret["is_linear"] is False
Esempio n. 3
0
def test_node_inputs_in_expected_order():
    raw_m = get_data("finn", "data/onnx/mnist-conv/model.onnx")
    model = ModelWrapper(raw_m)
    model = model.transform(InferShapes())
    ret = model.analysis(ta.node_inputs_in_expected_order)
    # this model has an (unnecessary) dynamic reshape for its weight tensor
    # and so it fails the check
    assert ret["node_inputs_in_expected_order"] is False
Esempio n. 4
0
def test_res_estimate():
    mw = mh = 4
    simd = 1
    pe = 1
    idt = DataType.INT2
    wdt = DataType.INT2
    odt = DataType.INT32
    actval = odt.min()

    inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, mw])
    outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, mh])
    node_inp_list = ["inp", "weights", "thresh"]

    FCLayer_node = helper.make_node(
        "StreamingFCLayer_Batch",
        node_inp_list,
        ["outp"],
        domain="finn",
        backend="fpgadataflow",
        resType="ap_resource_lut()",
        MW=mw,
        MH=mh,
        SIMD=simd,
        PE=pe,
        inputDataType=idt.name,
        weightDataType=wdt.name,
        outputDataType=odt.name,
        ActVal=actval,
        binaryXnorMode=0,
        noActivation=0,
    )
    graph = helper.make_graph(nodes=[FCLayer_node],
                              name="fclayer_graph",
                              inputs=[inp],
                              outputs=[outp])

    model = helper.make_model(graph, producer_name="fclayer-model")
    model = ModelWrapper(model)

    model.set_tensor_datatype("inp", idt)
    model.set_tensor_datatype("outp", odt)
    model.set_tensor_datatype("weights", wdt)

    model = model.transform(GiveUniqueNodeNames())
    prod_resource_estimation = model.analysis(res_estimation)
    expect_resource_estimation = {
        "StreamingFCLayer_Batch_0": {
            "BRAM_18K": 1,
            'BRAM_efficiency': 0.001736111111111111,
            "LUT": 304.4
        }
    }

    assert check_two_dict_for_equality(
        prod_resource_estimation,
        expect_resource_estimation), """The produced output of
Esempio n. 5
0
def step_hls_ipgen(model: ModelWrapper, cfg: DataflowBuildConfig):
    """Run Vivado HLS synthesis on generated code for HLSCustomOp nodes,
    in order to generate IP blocks."""

    model = model.transform(HLSSynthIP())
    model = model.transform(ReplaceVerilogRelPaths())
    report_dir = cfg.output_dir + "/report"
    os.makedirs(report_dir, exist_ok=True)
    estimate_layer_resources_hls = model.analysis(hls_synth_res_estimation)
    with open(report_dir + "/estimate_layer_resources_hls.json", "w") as f:
        json.dump(estimate_layer_resources_hls, f, indent=2)
    return model
Esempio n. 6
0
def test_all_tensors_f32():
    top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
    add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2])
    mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [2])
    top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2])
    modelproto = oh.make_model(
        oh.make_graph(
            name="test",
            inputs=[top_in],
            outputs=[top_out],
            value_info=[add_param, mul_param],
            nodes=[
                oh.make_node("Add", ["top_in", "add_param"], ["middle"]),
                oh.make_node("Mul", ["middle", "mul_param"], ["top_out"]),
            ],
        ))
    model = ModelWrapper(modelproto)
    model = model.transform(InferShapes())
    ret = model.analysis(ta.all_tensors_f32)
    assert ret["all_tensors_f32"] is True

    top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [2])
    add_param = oh.make_tensor_value_info("add_param", TensorProto.INT8, [2])
    mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [2])
    top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2])
    modelproto = oh.make_model(
        oh.make_graph(
            name="test",
            inputs=[top_in],
            outputs=[top_out],
            value_info=[add_param, mul_param],
            nodes=[
                oh.make_node("Add", ["top_in", "add_param"], ["middle"]),
                oh.make_node("Mul", ["middle", "mul_param"], ["top_out"]),
            ],
        ))
    model = ModelWrapper(modelproto)
    model = model.transform(InferShapes())
    ret = model.analysis(ta.all_tensors_f32)
    assert ret["all_tensors_f32"] is False
Esempio n. 7
0
def step_generate_estimate_reports(model: ModelWrapper,
                                   cfg: DataflowBuildConfig):
    "Generate per-layer resource and cycle estimates using analytical models."

    if DataflowOutputType.ESTIMATE_REPORTS in cfg.generate_outputs:
        report_dir = cfg.output_dir + "/report"
        os.makedirs(report_dir, exist_ok=True)
        ops_and_params = model.analysis(op_and_param_counts)
        with open(report_dir + "/op_and_param_counts.json", "w") as f:
            json.dump(ops_and_params, f, indent=2)
        estimate_layer_cycles = model.analysis(exp_cycles_per_layer)
        with open(report_dir + "/estimate_layer_cycles.json", "w") as f:
            json.dump(estimate_layer_cycles, f, indent=2)
        estimate_layer_resources = model.analysis(res_estimation)
        estimate_layer_resources["total"] = aggregate_dict_keys(
            estimate_layer_resources)
        with open(report_dir + "/estimate_layer_resources.json", "w") as f:
            json.dump(estimate_layer_resources, f, indent=2)
        estimate_layer_resources_complete = model.analysis(
            res_estimation_complete)
        with open(report_dir + "/estimate_layer_config_alternatives.json",
                  "w") as f:
            json.dump(estimate_layer_resources_complete, f, indent=2)
        # need to call AnnotateCycles before dataflow_performance
        model = model.transform(AnnotateCycles())
        estimate_network_performance = model.analysis(dataflow_performance)
        # add some more metrics to estimated performance
        n_clock_cycles_per_sec = (10**9) / cfg.synth_clk_period_ns
        est_fps = n_clock_cycles_per_sec / estimate_network_performance[
            "max_cycles"]
        estimate_network_performance["estimated_throughput_fps"] = est_fps
        est_latency_ns = (
            estimate_network_performance["critical_path_cycles"] *
            cfg.synth_clk_period_ns)
        estimate_network_performance["estimated_latency_ns"] = est_latency_ns
        with open(report_dir + "/estimate_network_performance.json", "w") as f:
            json.dump(estimate_network_performance, f, indent=2)
    return model
Esempio n. 8
0
def inference_cost(model_filename,
                   *,
                   output_json=None,
                   output_onnx=None,
                   preprocess=True,
                   discount_sparsity=True):
    """Print the inference cost estimate metric for given ONNX model.
    Supports the Quant op for weight/activation quantization.

    :param model_filename: Filename for ONNX model
    :param output_json: Optional JSON filename to save the inference cost dict
    :param output_onnx: Optional ONNX filename to save the final model after any
        preprocessing
    :param preprocess: If set, run preprocessing steps such as shape inference,
        datatype inference and constant folding. Strongly recommended.
    :param discount_sparsity: If set, will discount op cost of MAC ops with a
        constant zero weight, and the mem cost of constant zero weights.
    """
    print("Inference cost for " + model_filename)
    model = ModelWrapper(model_filename)
    if preprocess:
        qnt_nodes = model.get_nodes_by_op_type("Quant")
        for qnt_node in qnt_nodes:
            qnt_node.domain = "finn.custom_op.general"
        model = model.transform(InferShapes())
        model = model.transform(GiveUniqueParameterTensors())
        model = model.transform(InferDataTypes())
        model = model.transform(FoldConstants())
        model = model.transform(RemoveUnusedTensors())
        model = model.transform(RemoveStaticGraphInputs())
        model = model.transform(InferDataTypes())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    if output_onnx is not None:
        model.save(output_onnx)
    ret = model.analysis(lambda x: infca.inference_cost(x, discount_sparsity))
    bops = compute_bops(ret)
    mem_w_bits = compute_mem_bits(ret, "mem_w")
    mem_o_bits = compute_mem_bits(ret, "mem_o")
    ret["total_bops"] = bops
    ret["total_mem_w_bits"] = mem_w_bits
    ret["total_mem_o_bits"] = mem_o_bits

    if "unsupported" in ret:
        ret["unsupported"] = str(ret["unsupported"])
    print(json.dumps(ret, sort_keys=True, indent=2))

    if output_json is not None:
        with open(output_json, "w") as f:
            json.dump(ret, f, sort_keys=True, indent=2)
Esempio n. 9
0
def test_verify_custom_nodes():
    inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, 13, 64])
    outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, 1, 64])

    # MultiThreshold
    m_node = helper.make_node(
        "MultiThreshold",
        ["xnor_out", "threshs"],
        ["outp"],
        domain="finn",
        out_scale=2.0,
        out_bias=-1.0,
        out_dtype="",
    )

    # XnorPopcountMatMul
    xnor_node = helper.make_node(
        "XnorPopcountMatMul",
        ["fclayer_out0", "fclayer_out1"],
        ["xnor_out"],
        domain="finn",
    )

    # StreamingMaxPool_Batch
    MaxPool_batch_node = helper.make_node(
        "StreamingMaxPool_Batch",
        ["inp"],
        ["max_out"],
        domain="finn",
        backend="fpgadataflow",
        code_gen_dir="",
        executable_path="",
        ImgDim=4,
        PoolDim=2,
        NumChannels=2,
    )

    # StreamingFCLayer_Batch - no activation
    FCLayer0_node = helper.make_node(
        "StreamingFCLayer_Batch",
        ["max_out", "weights"],
        ["fclayer_out0"],
        domain="finn",
        backend="fpgadataflow",
        code_gen_dir="",
        executable_path="",
        resType="ap_resource_lut()",
        MW=8,
        MH=8,
        SIMD=4,
        PE=4,
        inputDataType="<FINN DataType>",
        weightDataType="<FINN DataType>",
        outputDataType="<FINN DataType>",
        ActVal=0,
        binaryXnorMode=1,
        noActivation=1,
    )

    # StreamingFCLayer_Batch - with activation
    FCLayer1_node = helper.make_node(
        "StreamingFCLayer_Batch",
        ["fclayer_out0", "weights", "threshs"],
        ["fclayer_out1"],
        domain="finn",
        backend="fpgadataflow",
        code_gen_dir="",
        executable_path="",
        resType="ap_resource_lut()",
        MW=8,
        MH=8,
        SIMD=4,
        PE=4,
        inputDataType="<FINN DataType>",
        weightDataType="<FINN DataType>",
        outputDataType="<FINN DataType>",
        ActVal=0,
        binaryXnorMode=1,
        noActivation=0,
    )

    graph = helper.make_graph(
        nodes=[
            MaxPool_batch_node, FCLayer0_node, FCLayer1_node, xnor_node, m_node
        ],
        name="custom_op_graph",
        inputs=[inp],
        outputs=[outp],
        value_info=[
            helper.make_tensor_value_info("max_out", TensorProto.FLOAT,
                                          [1, 13, 64]),
            helper.make_tensor_value_info("weights", TensorProto.FLOAT,
                                          [64, 32, 416]),
            helper.make_tensor_value_info("threshs", TensorProto.FLOAT,
                                          [32, 32, 16]),
            helper.make_tensor_value_info("xnor_out", TensorProto.FLOAT,
                                          [1, 32, 32]),
            helper.make_tensor_value_info("fclayer_out0", TensorProto.FLOAT,
                                          [1, 32, 32]),
            helper.make_tensor_value_info("fclayer_out1", TensorProto.FLOAT,
                                          [32, 64, 512]),
        ],
    )
    model = helper.make_model(graph, producer_name="custom-op-model")
    model = ModelWrapper(model)

    produced = model.analysis(verify_nodes)

    expected = {
        "StreamingMaxPool_Batch": [
            "The number of attributes is correct",
            "Attribute domain is set correctly",
            "Attribute backend is set correctly",
            "All necessary attributes exist",
            "The number of inputs is correct",
        ],
        "StreamingFCLayer_Batch": [
            "The number of attributes is correct",
            "Attribute domain is set correctly",
            "Attribute backend is set correctly",
            "All necessary attributes exist",
            "The number of inputs is correct",
        ],
        "XnorPopcountMatMul": [
            "The number of attributes is correct",
            "Attribute domain is set correctly",
            "XnorPopcountMatMul should not have any attributes",
            "The number of inputs is correct",
        ],
        "MultiThreshold": [
            "The number of attributes is correct",
            "Attribute domain is set correctly",
            "All necessary attributes exist",
            "The number of inputs is correct",
        ],
    }

    assert check_two_dict_for_equality(produced,
                                       expected), """The produced output of
Esempio n. 10
0
def test_nodes_topologically_sorted():
    # test analysis pass (nodes_topologically_sorted) with different models

    # test with data/onnx/finn-hls-model/tfc_w1_a1_after_conv_to_hls.onnx
    raw_m = get_data(
        "finn", "data/onnx/finn-hls-model/tfc_w1_a1_after_conv_to_hls.onnx")
    model = ModelWrapper(raw_m)
    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is True

    # remove first node and add it at the end
    graph = model.graph
    first_node = graph.node[0]
    graph.node.remove(first_node)
    graph.node.append(first_node)
    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is False

    # test with data/onnx/mnist-conv/model.onnx
    raw_m = get_data("finn", "data/onnx/mnist-conv/model.onnx")
    model = ModelWrapper(raw_m)
    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is True

    # remove first node and add it at the end
    graph = model.graph
    first_node = graph.node[0]
    graph.node.remove(first_node)
    graph.node.append(first_node)
    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is False

    # test with manually created small network
    Neg_node = oh.make_node("Neg", inputs=["in1"], outputs=["neg1"])
    Round_node = oh.make_node("Round", inputs=["neg1"], outputs=["round1"])

    Ceil_node = oh.make_node("Ceil", inputs=["neg1"], outputs=["ceil1"])
    Add_node = oh.make_node("Add",
                            inputs=["round1", "ceil1"],
                            outputs=["out1"])

    in1 = oh.make_tensor_value_info("in1", TensorProto.FLOAT, [4, 4])
    out1 = oh.make_tensor_value_info("out1", TensorProto.FLOAT, [4, 4])

    graph = oh.make_graph(
        nodes=[Neg_node, Round_node, Ceil_node, Add_node],
        name="simple_graph",
        inputs=[in1],
        outputs=[out1],
        value_info=[
            oh.make_tensor_value_info("neg1", TensorProto.FLOAT, [4, 4]),
            oh.make_tensor_value_info("round1", TensorProto.FLOAT, [4, 4]),
            oh.make_tensor_value_info("ceil1", TensorProto.FLOAT, [4, 4]),
        ],
    )

    onnx_model = oh.make_model(graph, producer_name="simple-model")
    model = ModelWrapper(onnx_model)

    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is True

    # create same graph but with "wrong" node order
    graph = oh.make_graph(
        nodes=[Round_node, Ceil_node, Neg_node, Add_node],
        name="simple_graph",
        inputs=[in1],
        outputs=[out1],
        value_info=[
            oh.make_tensor_value_info("neg1", TensorProto.FLOAT, [4, 4]),
            oh.make_tensor_value_info("round1", TensorProto.FLOAT, [4, 4]),
            oh.make_tensor_value_info("ceil1", TensorProto.FLOAT, [4, 4]),
        ],
    )

    onnx_model = oh.make_model(graph, producer_name="simple-model")
    model = ModelWrapper(onnx_model)

    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is False

    # test with data/onnx/finn-hls-model/finn-hls-onnx-model.onnx
    raw_m = get_data("finn",
                     "data/onnx/finn-hls-model/finn-hls-onnx-model.onnx")
    model = ModelWrapper(raw_m)
    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is True

    # remove first node and add it at the end
    graph = model.graph
    first_node = graph.node[0]
    graph.node.remove(first_node)
    graph.node.append(first_node)
    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is False

    # test with cnv_w1a1
    build_dir = "/tmp/" + os.environ["FINN_INST_NAME"]
    cnv = get_test_model_trained("CNV", 1, 1)
    bo.export_finn_onnx(cnv, (1, 3, 32, 32),
                        build_dir + "/end2end_cnv_w1a1_export.onnx")
    model = ModelWrapper(build_dir + "/end2end_cnv_w1a1_export.onnx")
    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is True

    # remove first node and add it at the end
    graph = model.graph
    first_node = graph.node[0]
    graph.node.remove(first_node)
    graph.node.append(first_node)
    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is False
Esempio n. 11
0
def test_QONNX_to_FINN(model_name, wbits, abits):
    if wbits > abits:
        pytest.skip("No wbits > abits cases at the moment")
    if model_name == "LFC" and wbits == 2 and abits == 2:
        pytest.skip("No LFC-w2a2 present at the moment")
    if model_name == "mobilenet" and (wbits != 2 or abits != 2):
        pytest.skip("Mobilenet only runs at W2A2, though it's technically W4A4.")

    # Get test config and model
    ATOL = 1e-7
    brev_model, in_shape, input_tensor = get_brev_model_and_sample_inputs(
        model_name, wbits, abits
    )
    temp_dir = TemporaryDirectory()
    qonnx_base_path = temp_dir.name + "/qonnx_{}.onnx"
    finn_base_path = temp_dir.name + "/finn_{}.onnx"

    # Get Brevitas output
    torch_input_tensor = torch.from_numpy(input_tensor).float()
    brev_output = brev_model.forward(torch_input_tensor).detach().numpy()

    # Get "clean" FINN model and it's output
    _ = bo.export_finn_onnx(brev_model, in_shape, finn_base_path.format("raw"))
    model = ModelWrapper(finn_base_path.format("raw"))
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(InferShapes())
    model = model.transform(FoldConstants())
    model = model.transform(RemoveStaticGraphInputs())
    model.save(finn_base_path.format("clean"))

    model = ModelWrapper(finn_base_path.format("clean"))
    input_dict = {model.graph.input[0].name: input_tensor}
    output_dict = oxe.execute_onnx(model, input_dict, False)
    finn_export_output = output_dict[model.graph.output[0].name]
    # This test always fails on MobileNet for some reason
    if model_name != "mobilenet":
        assert np.isclose(
            brev_output, finn_export_output, atol=ATOL
        ).all(), "The output of the Brevitas model and the FINN model should match."

    # Get the equivalent QONNX model
    b_onnx.function.DOMAIN_STRING = "finn.custom_op.general"
    _ = b_onnx.manager.BrevitasONNXManager.export(
        brev_model, in_shape, qonnx_base_path.format("raw")
    )
    cleanup(qonnx_base_path.format("raw"), out_file=qonnx_base_path.format("clean"))

    # Compare output
    model = ModelWrapper(qonnx_base_path.format("clean"))
    input_dict = {model.graph.input[0].name: input_tensor}
    output_dict = oxe.execute_onnx(model, input_dict, False)
    qonnx_export_output = output_dict[model.graph.output[0].name]
    assert np.isclose(
        brev_output, qonnx_export_output, atol=ATOL
    ).all(), "The output of the Brevitas model and the QONNX model should match."
    # This test always fails on MobileNet for some reason
    if model_name != "mobilenet":
        assert np.isclose(
            qonnx_export_output, finn_export_output, atol=ATOL
        ).all(), "The output of the FINN model and the QONNX model should match."

    # Run QONNX to FINN conversion
    model = ModelWrapper(qonnx_base_path.format("clean"))
    model = model.transform(ConvertQONNXtoFINN())
    model.save(qonnx_base_path.format("whole_trafo"))

    # Compare output
    model = ModelWrapper(qonnx_base_path.format("whole_trafo"))
    input_dict = {model.graph.input[0].name: input_tensor}
    output_dict = oxe.execute_onnx(model, input_dict, False)
    test_output = output_dict[model.graph.output[0].name]
    assert np.isclose(test_output, finn_export_output, atol=ATOL).all(), (
        "The output of the FINN model "
        "and the QONNX -> FINN converted model should match."
    )

    # Run analysis passes on the converted model
    model = ModelWrapper(qonnx_base_path.format("whole_trafo"))
    _ = model.analysis(analysis_testing_for_no_quant_nodes)

    temp_dir.cleanup()
Esempio n. 12
0
def test_res_estimate():
    mw = mh = 4
    simd = 1
    pe = 1
    idt = DataType["INT2"]
    wdt = DataType["INT2"]
    odt = DataType["INT2"]
    actval = odt.min()

    inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, mw])
    outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, mh])
    node_inp_list = ["inp", "weights", "thresh"]

    FCLayer_node = helper.make_node(
        "StreamingFCLayer_Batch",
        node_inp_list,
        ["outp"],
        domain="finn.custom_op.fpgadataflow",
        backend="fpgadataflow",
        MW=mw,
        MH=mh,
        SIMD=simd,
        PE=pe,
        inputDataType=idt.name,
        weightDataType=wdt.name,
        outputDataType=odt.name,
        ActVal=actval,
        binaryXnorMode=0,
        noActivation=0,
    )
    graph = helper.make_graph(
        nodes=[FCLayer_node], name="fclayer_graph", inputs=[inp], outputs=[outp]
    )

    model = helper.make_model(graph, producer_name="fclayer-model")
    model = ModelWrapper(model)

    model.set_tensor_datatype("inp", idt)
    model.set_tensor_datatype("outp", odt)
    model.set_tensor_datatype("weights", wdt)

    model = model.transform(GiveUniqueNodeNames())
    prod_resource_estimation = model.analysis(res_estimation)
    expect_resource_estimation = {
        "StreamingFCLayer_Batch_0": {
            "BRAM_18K": 0,
            "BRAM_efficiency": 1,
            "LUT": 357,
            "DSP": 0,
            "URAM_efficiency": 1,
            "URAM": 0,
        }
    }

    assert check_two_dict_for_equality(
        prod_resource_estimation, expect_resource_estimation
    ), """The produced output of
    the res_estimation analysis pass is not equal to the expected one"""

    prod_resource_estimation = model.analysis(res_estimation_complete)
    expect_resource_estimation = {
        "StreamingFCLayer_Batch_0": [
            {
                "BRAM_18K": 0,
                "BRAM_efficiency": 1,
                "LUT": 352,
                "DSP": 1,
                "URAM": 0,
                "URAM_efficiency": 1,
            },
            {
                "BRAM_18K": 0,
                "BRAM_efficiency": 1,
                "LUT": 357,
                "DSP": 0,
                "URAM": 0,
                "URAM_efficiency": 1,
            },
        ]
    }

    assert check_two_dict_for_equality(
        prod_resource_estimation, expect_resource_estimation
    ), """The produced output of
Esempio n. 13
0
def test_nodes_topologically_sorted():
    # test analysis pass (nodes_topologically_sorted) with different models
    # test with data/onnx/mnist-conv/model.onnx
    raw_m = get_data("finn.data", "onnx/mnist-conv/model.onnx")
    model = ModelWrapper(raw_m)
    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is True

    # remove first node and add it at the end
    graph = model.graph
    first_node = graph.node[0]
    graph.node.remove(first_node)
    graph.node.append(first_node)
    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is False

    # test with manually created small network
    Neg_node = oh.make_node("Neg", inputs=["in1"], outputs=["neg1"])
    Round_node = oh.make_node("Round", inputs=["neg1"], outputs=["round1"])

    Ceil_node = oh.make_node("Ceil", inputs=["neg1"], outputs=["ceil1"])
    Add_node = oh.make_node("Add",
                            inputs=["round1", "ceil1"],
                            outputs=["out1"])

    in1 = oh.make_tensor_value_info("in1", TensorProto.FLOAT, [4, 4])
    out1 = oh.make_tensor_value_info("out1", TensorProto.FLOAT, [4, 4])

    graph = oh.make_graph(
        nodes=[Neg_node, Round_node, Ceil_node, Add_node],
        name="simple_graph",
        inputs=[in1],
        outputs=[out1],
        value_info=[
            oh.make_tensor_value_info("neg1", TensorProto.FLOAT, [4, 4]),
            oh.make_tensor_value_info("round1", TensorProto.FLOAT, [4, 4]),
            oh.make_tensor_value_info("ceil1", TensorProto.FLOAT, [4, 4]),
        ],
    )

    onnx_model = oh.make_model(graph, producer_name="simple-model")
    model = ModelWrapper(onnx_model)

    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is True

    # create same graph but with "wrong" node order
    graph = oh.make_graph(
        nodes=[Round_node, Ceil_node, Neg_node, Add_node],
        name="simple_graph",
        inputs=[in1],
        outputs=[out1],
        value_info=[
            oh.make_tensor_value_info("neg1", TensorProto.FLOAT, [4, 4]),
            oh.make_tensor_value_info("round1", TensorProto.FLOAT, [4, 4]),
            oh.make_tensor_value_info("ceil1", TensorProto.FLOAT, [4, 4]),
        ],
    )

    onnx_model = oh.make_model(graph, producer_name="simple-model")
    model = ModelWrapper(onnx_model)

    ret = model.analysis(ta.nodes_topologically_sorted)
    assert ret["nodes_topologically_sorted"] is False