예제 #1
0
    def test_slice(self):
        X = np.random.randn(1, 2, 3).astype(np.float32)
        starts = np.array([0, 1, 0], dtype=np.int32)
        ends = np.array([-1, 2, 3], dtype=np.int32)

        predict_net = caffe2_pb2.NetDef()
        predict_net.name = 'test-slice-net'
        predict_net.external_input[:] = ['X']
        predict_net.external_output[:] = ['Y']
        predict_net.op.extend([
            core.CreateOperator(
                'Slice',
                inputs=['X'],
                outputs=['Y'],
                starts=starts,
                ends=ends,
            ),
        ])
        ws, (Y, ) = c2_native_run_net(init_net=None,
                                      predict_net=predict_net,
                                      inputs=[X])

        onnx_model = c2_onnx.caffe2_net_to_onnx_model(
            predict_net=predict_net,
            value_info={
                'X': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[X.dtype], X.shape)
            })
        Y, = c2.run_model(onnx_model, inputs=[X])
        np.testing.assert_almost_equal(Y, X[:, 1:2, :])
예제 #2
0
def check_output(model, x):
    with tempfile.NamedTemporaryFile('wb') as fp:
        onnx_chainer.export(model, x, fp)
        onnx_model = onnx.ModelProto.FromString(open(fp.name, 'rb').read())

        init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(
            onnx_model.graph, device='CPU')

        benchmark_caffe2_model(init_net, predict_net)

        y = model(x)
        if isinstance(y, dict):
            y = y['prob']
        chainer_out = y.array
        caffe2_out = run_model(onnx_model, [x])[0]

        np.testing.assert_almost_equal(chainer_out, caffe2_out, decimal=5)
예제 #3
0
x = np.random.randn(1, 3, 224, 224).astype(np.float32)

# Do not forget setting train flag off!
chainer.config.train = False

# Export to ONNX model
onnx_model = onnx_chainer.export(model, x)

# Get an output of Chainer model
y = model(x)
if isinstance(y, dict):
    y = y['prob']
chainer_out = y.array

# Convert ONNX model to Caffe2 model
init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(
    onnx_model.graph, device='CPU')

# Save the Caffe2 model to disk
init_file = "./vgg16_init.pb"
predict_file = "./vgg16_predict.pb"
save_caffe2_net(init_net, init_file, output_txt=False)
save_caffe2_net(predict_net, predict_file, output_txt=True)

# Get an output of Caffe2 model
caffe2_out = run_model(onnx_model, [x])[0]

# Check those two outputs have same values
np.testing.assert_almost_equal(
    chainer_out, caffe2_out, decimal=5)
예제 #4
0
    def test_convert_end2end(self):
        predict_net_f = tempfile.NamedTemporaryFile()
        init_net_f = tempfile.NamedTemporaryFile()
        onnx_model_f = tempfile.NamedTemporaryFile()

        x = 'X'
        w = 'W'
        b = 'b'
        y = 'Y'

        predict_net = caffe2_pb2.NetDef()
        predict_net.name = 'test-convert-end2end'
        predict_net.external_input[:] = [x, w, b]
        predict_net.external_output[:] = [y]
        predict_net.op.extend([
            core.CreateOperator(
                'FC',
                inputs=[x, w, b],
                outputs=[y],
                axis=2,
            ),
        ])
        predict_net_f.write(predict_net.SerializeToString())
        predict_net_f.flush()

        init_net = caffe2_pb2.NetDef()
        init_net.name = 'test-convert-end2end-init'
        init_net.external_output[:] = [w, b]
        x_val = np.random.randn(1, 3, 2).astype(np.float32)
        w_val = np.random.randn(4, 2).astype(np.float32)
        b_val = np.random.randn(4).astype(np.float32)
        init_net.op.extend([
            core.CreateOperator(
                'GivenTensorFill',
                [],
                [w],
                values=w_val,
                shape=w_val.shape,
            ),
            core.CreateOperator(
                'GivenTensorFill',
                [],
                [b],
                values=b_val,
                shape=b_val.shape,
            ),
        ])
        init_net_f.write(init_net.SerializeToString())
        init_net_f.flush()

        y_val = np.matmul(x_val, w_val.transpose()) + b_val
        for _ in range(5):
            self._run_command(
                caffe2_to_onnx, [
                    predict_net_f.name,
                    '--caffe2-init-net', init_net_f.name,
                    '--output', onnx_model_f.name,
                    '--value-info',
                    json.dumps({
                        x: (TensorProto.FLOAT, (1, 3, 2)),
                    }),
                ],
                catch_exceptions=False,
            )

            onnx_model_f.seek(0)
            onnx_model = ModelProto()
            onnx_model.ParseFromString(onnx_model_f.read())
            np.testing.assert_almost_equal(
                c2.run_model(
                    onnx_model, {onnx_model.graph.input[0].name: x_val}),
                [y_val])

            self._run_command(
                onnx_to_caffe2, [
                    onnx_model_f.name,
                    '--output', predict_net_f.name,
                    '--init-net-output', init_net_f.name,
                ])
            predict_net_f.seek(0)
            predict_net = caffe2_pb2.NetDef()
            predict_net.ParseFromString(predict_net_f.read())
            init_net_f.seek(0)
            init_net = caffe2_pb2.NetDef()
            init_net.ParseFromString(init_net_f.read())
            x = predict_net.external_input[0]
            np.testing.assert_almost_equal(c2_native_run_net(init_net=init_net,
                                                             predict_net=predict_net,
                                                             inputs={x: x_val})[1],
                                           [y_val])
예제 #5
0
import onnx
import torch.onnx

import onnx_caffe2.backend as backend
import torch as t
import vnet
import torch.nn as nn
import numpy as np
#
# model = vnet.VNet(elu=True, nll=False)
# model = nn.parallel.DataParallel(model)
# dummy_input = torch.randn(1, 1, 64, 224, 224,device='cuda').float()
# model = t.load('vnet_checkpoint.pth')['model']
# # model.load_state_dict(state)
# # model.dump_patch = True
# model.eval()
# #
# model = model.cuda()
# #
# torch.onnx.export(model, dummy_input, "vnet.onnx", verbose=True)
# # # #
model = onnx.load("vnet.onnx")
onnx.checker.check_model(model)
# print(onnx.helper.printable_graph(model.graph))
# rep = backend.prepare(model, device="CUDA")
# outputs = rep.run(np.random.randn(1, 1, 64, 224, 224).astype(np.float32))
# print(outputs[0])
out = backend.run_model(model,
                        np.random.randn(1, 1, 64, 224, 224).astype(np.float32))
print(out)