示例#1
0
def test_end2end_mobilenet_convert_to_hls_layers():
    model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_lowered.onnx")
    model = model.transform(to_hls.InferPool_Batch())
    model = model.transform(to_hls.InferConvInpGen())
    model = model.transform(to_hls.InferVVAU())
    model = model.transform(to_hls.InferQuantizedStreamingFCLayer(mem_mode))
    model = model.transform(to_hls.InferChannelwiseLinearLayer())
    model = model.transform(to_hls.InferLabelSelectLayer())
    model = model.transform(InferShapes())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    model.save(build_dir + "/end2end_mobilenet_hls_layers.onnx")
示例#2
0
def step_mobilenet_convert_to_hls_layers(model: ModelWrapper,
                                         cfg: DataflowBuildConfig):
    mem_mode = cfg.default_mem_mode.value
    model = model.transform(to_hls.InferPool_Batch())
    model = model.transform(to_hls.InferConvInpGen())
    model = model.transform(to_hls.InferVVAU())
    model = model.transform(to_hls.InferQuantizedStreamingFCLayer(mem_mode))
    model = model.transform(to_hls.InferChannelwiseLinearLayer())
    model = model.transform(to_hls.InferLabelSelectLayer())
    model = model.transform(InferShapes())
    model = model.transform(GiveUniqueNodeNames())
    model = model.transform(GiveReadableTensorNames())
    return model
def test_convert_to_hls_pool_batch(idt, odt, pool_config, ifm_ch, pe, op_type,
                                   exec_mode):
    k, stride, pad, ifm_dim = pool_config

    if ifm_ch % pe != 0:
        pytest.skip("ifm_ch%pe != 0. Skipping")

    if pad != 0 and idt.signed():
        pytest.skip("No support for pal_val != 0. Skipping")

    np.random.seed(0)
    ofm_dim = int(((ifm_dim + 2 * pad - k) / stride) + 1)

    x = gen_finn_dt_tensor(idt, (1, ifm_ch, ifm_dim, ifm_dim))
    # prepare input data
    input_dict = prepare_inputs(x)
    if op_type == "MaxPool":
        # if idt.signed():
        #     pytest.skip("""No support for signed input (see accu initialization
        #         in Pool_batch HLSLIB function). Skipping""")

        if idt != odt:
            pytest.skip("Skipping Maxpool with idt != odt")

        model = make_single_maxpool_modelwrapper(k, stride, pad, ifm_ch,
                                                 ifm_dim, ofm_dim, idt)
    elif op_type == "QuantAvgPool2d":
        if pad != 0:
            pytest.skip("No padding support for QuantAvgPool2d. Skipping")

        if idt.signed() != odt.signed():
            pytest.skip(
                "Skipping QuantAvgPool2d with idt.signed() != odt.signed()")
        model = make_single_quantavpool_modelwrapper(k, stride, ifm_ch,
                                                     ifm_dim, ofm_dim, idt,
                                                     odt)
    else:
        assert False, "{} is not a supported op_type".format(op_type)

    y_expected = oxe.execute_onnx(model, input_dict)["outp"]

    new_model = model.transform(to_hls.InferPool_Batch())
    new_model = new_model.transform(GiveUniqueNodeNames())

    if ifm_ch != pe:
        new_model = new_model.transform(to_hls.InferConvInpGen())
        # Folding
        for n in new_model.graph.node:
            if n.op_type == "ConvolutionInputGenerator":
                inst = getCustomOp(n)
                inst.set_nodeattr("SIMD", pe)
            elif n.op_type == "Pool_Batch":
                inst = getCustomOp(n)
                inst.set_nodeattr("PE", pe)

    if exec_mode == "cppsim":
        new_model = new_model.transform(SetExecMode("cppsim"))
        new_model = new_model.transform(PrepareCppSim())
        new_model = new_model.transform(CompileCppSim())
    elif exec_mode == "rtlsim":
        new_model = new_model.transform(SetExecMode("rtlsim"))
        new_model = new_model.transform(GiveUniqueNodeNames())
        new_model = new_model.transform(PrepareIP("xc7z020clg400-1", 5))
        new_model = new_model.transform(HLSSynthIP())
        new_model = new_model.transform(PrepareRTLSim())
    else:
        raise Exception("Unknown exec_mode")

    # execute new_model
    y_produced = oxe.execute_onnx(new_model, input_dict)["outp"]
    assert (y_produced == y_expected).all()
    if stride <= k:
        if pad == 0 or ifm_ch == pe:
            assert len(new_model.graph.node) == 4
        else:
            assert len(new_model.graph.node) == 5
    else:
        assert len(new_model.graph.node) == 1

    if exec_mode == "rtlsim":
        node = new_model.get_nodes_by_op_type("Pool_Batch")[0]
        inst = getCustomOp(node)
        cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim")
        exp_cycles_dict = new_model.analysis(exp_cycles_per_layer)
        exp_cycles = exp_cycles_dict[node.name]
        assert np.isclose(exp_cycles, cycles_rtlsim, atol=10)