def _test_torch_model_single_io(torch_model,
                                torch_input_shape,
                                coreml_input_shape,
                                minimum_ios_deployment_target='12',
                                decimal=4,
                                opset_version=9):
    # run torch model
    torch_input = torch.rand(*torch_input_shape)
    torch_out_raw = torch_model(torch_input)
    if isinstance(torch_out_raw, tuple):
        torch_out = torch_out_raw[0].detach().numpy()
    else:
        torch_out = torch_out_raw.detach().numpy()

    # convert to onnx model
    model_dir = tempfile.mkdtemp()
    if DEBUG:
        model_dir = '/tmp'
    onnx_file = os.path.join(model_dir, 'torch_model.onnx')
    torch.onnx.export(torch_model,
                      torch_input,
                      onnx_file,
                      opset_version=opset_version)
    onnx_model = onnx.load(onnx_file)

    # convert to coreml and run
    coreml_model = convert(
        onnx_model,
        minimum_ios_deployment_target=minimum_ios_deployment_target)

    output_name = [o.name for o in onnx_model.graph.output][0]
    initializer_names = {t.name for t in onnx_model.graph.initializer}
    input_name = [
        i.name for i in onnx_model.graph.input
        if i.name not in initializer_names
    ][0]
    input_numpy = torch_input.detach().numpy()
    if SupportedVersion.is_nd_array_supported(minimum_ios_deployment_target):
        input_dict = {input_name: input_numpy}  # type: ignore
    else:
        input_dict = {
            input_name: np.reshape(input_numpy, coreml_input_shape)
        }  # type: ignore
    coreml_out = coreml_model.predict(input_dict, useCPUOnly=True)[output_name]
    if DEBUG:
        coreml_model.save(model_dir + '/torch_model.mlmodel')
        print('coreml_out')
        print(np.squeeze(coreml_out))
        print('torch_out')
        print(np.squeeze(torch_out))
        print('coreml out shape ', coreml_out.shape)
        print('torch out shape: ', torch_out.shape)

    # compare
    _assert_outputs([torch_out], [coreml_out], decimal=decimal)  # type: ignore

    # delete onnx model
    if not DEBUG:
        if os.path.exists(model_dir):
            shutil.rmtree(model_dir)
Exemplo n.º 2
0
def _test_torch_model_single_io(torch_model, torch_input_shape,
                                coreml_input_shape):
    # run torch model
    torch_input = torch.rand(*torch_input_shape)
    torch_out = torch_model(torch_input).detach().numpy()

    # convert to onnx model
    model_dir = tempfile.mkdtemp()
    onnx_file = os.path.join(model_dir, 'torch_model.onnx')
    torch.onnx.export(torch_model, torch_input, onnx_file)
    onnx_model = onnx.load(onnx_file)

    # convert to coreml and run
    coreml_model = convert(onnx_model)
    output_name = [o.name for o in onnx_model.graph.output][0]
    initializer_names = {t.name for t in onnx_model.graph.initializer}
    input_name = [
        i.name for i in onnx_model.graph.input
        if i.name not in initializer_names
    ][0]
    input_numpy = torch_input.detach().numpy()
    input_dict = {
        input_name: np.reshape(input_numpy, coreml_input_shape)
    }  # type: ignore
    coreml_out = coreml_model.predict(input_dict, useCPUOnly=True)[output_name]

    # compare
    _assert_outputs([torch_out], [coreml_out], decimal=4)  # type: ignore

    # delete onnx model
    if os.path.exists(model_dir):
        shutil.rmtree(model_dir)
Exemplo n.º 3
0
    def skip_test_lstm(self):  # type: () -> None
        x = 4
        h = 2
        seq_length = 3
        W = from_array(_random_array((4 * h, x)), name="gate_weights")
        R = from_array(_random_array((4 * h, h)), name="recursion_weights")
        B = from_array(_random_array((8 * h, )), name="biases")
        seq_lens_input = from_array(np.array([seq_length]).astype(np.int32),
                                    name='seq_lens_input')
        initial_h = from_array(np.zeros((1, 1, h)).astype(np.float32),
                               name='initial_h')
        initial_c = from_array(np.zeros((1, 1, h)).astype(np.float32),
                               name='initial_c')

        input_shape = (seq_length, 1, x)
        output_shape_all = (seq_length, 1, h)
        output_shape_last = (1, 1, h)

        onnx_model = _onnx_create_single_node_model(
            "LSTM", [input_shape], [output_shape_all, output_shape_last],
            initializer=[W, R, B, seq_lens_input, initial_h, initial_c],
            hidden_size=h)
        X = np.random.rand(*input_shape).astype("float32")  #type: ignore
        prepared_backend = caffe2.python.onnx.backend.prepare(onnx_model)
        out = prepared_backend.run({'input0': X})
        caffe2_out_all = out['output0']
        caffe2_out_last = out['output1']

        coreml_model = convert(onnx_model)
        inputdict = {}
        inputdict['input0'] = X
        inputdict['initial_h'] = np.zeros((h), dtype=np.float32)
        inputdict['initial_c'] = np.zeros((h), dtype=np.float32)
        coreml_out_dict = coreml_model.predict(inputdict, useCPUOnly=True)
        coreml_out_all = coreml_out_dict['output0']
        coreml_out_last = coreml_out_dict['output1']

        _assert_outputs(caffe2_out_all.flatten(),
                        coreml_out_all.flatten(),
                        decimal=5)
        _assert_outputs(caffe2_out_last.flatten(),
                        coreml_out_last.flatten(),
                        decimal=5)