コード例 #1
0
def run_end2end_test(tflite_path, onnx_path, tflite_layout, onnx_layout, tensors):
    io_layouts = {t: (tflite_layout, onnx_layout) for t in tensors}
    t2o.convert(tflite_path, onnx_path, io_layouts)

    m = shrub.tflite.parse(tflite_path, tflite_layout)
    m.genInput()

    onnx_ret = shrub.onnx.run(onnx_path, m.inputs, onnx_layout)
    tflite_ret = shrub.tflite.run(tflite_path, m.inputs, tflite_layout)
    assert(shrub.network.cmpTensors(onnx_ret, tflite_ret, useLayout=tflite_layout))
コード例 #2
0
def end2end_test(model_name, use_layout):
    cur_dir = os.path.dirname(os.path.abspath(__file__))
    tflm_dir = os.path.abspath(cur_dir + '/../assets/tests')
    tflm_name = model_name + '.tflite'
    onnx_name = model_name + '.onnx'
    tflm_path = os.path.join(tflm_dir, tflm_name)
    t2o.convert(tflm_path, onnx_name)

    m = shrub.tflite.parse(tflm_path)
    m.genInput()

    onnx_ret = shrub.onnx.run(onnx_name, m.inputs, use_layout)
    tflite_ret = shrub.tflite.run(tflm_path, m.inputs)
    assert(shrub.network.cmpTensors(onnx_ret, tflite_ret, useLayout=use_layout))
コード例 #3
0
ファイル: test_quantize.py プロジェクト: erizmr/tflite2onnx
def end2end_test(model_name, use_layout, atol, rtol):
    cur_dir = os.path.dirname(os.path.abspath(__file__))
    tflm_dir = os.path.abspath(cur_dir + '/../assets/tests')
    tflm_name = model_name + '.tflite'
    onnx_name = model_name + '.onnx'
    tflm_path = os.path.join(tflm_dir, tflm_name)
    t2o.convert(tflm_path, onnx_name)

    m = shrub.tflite.parse(tflm_path)
    m.genInput()

    # TFLite model is supposed to be end to end quantized
    tflite_ret = shrub.tflite.run(tflm_path, m.inputs)
    oquant = shrub.tflite.parseQuantParam(tflm_path, False)[0]
    foutputs = list()
    for f in tflite_ret:
        foutput = copy.deepcopy(f)
        foutput.quant = oquant
        foutput.dequantize()
        foutputs.append(foutput)

    # ONNX model is supposed to be only several operators quantized
    iquant = shrub.tflite.parseQuantParam(tflm_path, True)[0]
    finputs = list()
    for q in m.inputs:
        finput = copy.deepcopy(q)
        finput.quant = iquant
        finput.dequantize()
        finputs.append(finput)
    onnx_ret = shrub.onnx.run(onnx_name, finputs, use_layout)

    assert (shrub.network.cmpTensors(foutputs,
                                     onnx_ret,
                                     atol=atol,
                                     rtol=rtol,
                                     useLayout=use_layout))
コード例 #4
0
import tf2onnx
import tflite2onnx

tflite_path = 'checkpoint/SonyTFLite/tf2_sony_int8.tflite'
onnx_path = 'checkpoint/int8_sony'
tflite2onnx.convert(tflite_path, onnx_path)
"""DON'T USE THIS!! USE THE FOLLOWING COMMAND:
python -m tf2onnx.convert --opset 13 --tflite 
checkpoint/SonyTFLite/tf2_sony_float32_pad.tflite --output checkpoint/float32_sony_pad.onnx,
where after --tflite argument place your tflite model, and after --output put the filepath to the output
onnx model

"""

コード例 #5
0
ファイル: test.py プロジェクト: ashikns/tflite2onnx
import tflite2onnx as to
import logging

logging.basicConfig(level=logging.DEBUG)
to.convert('test.tflite', 'test.onnx')