Exemple #1
0
def onnx_to_caffe2(onnx_model, output, init_net_output):
    onnx_model_proto = ModelProto()
    onnx_model_proto.ParseFromString(onnx_model.read())

    init_net, predict_net = c2.onnx_graph_to_caffe2_net(onnx_model_proto)
    init_net_output.write(init_net.SerializeToString())
    output.write(predict_net.SerializeToString())
Exemple #2
0
def onnx_to_caffe2(onnx_model, output, init_net_output, mobile):
    onnx_model_proto = onnx_pb2.ModelProto()
    onnx_model_proto.ParseFromString(onnx_model.read())
    graph_def = onnx_model_proto.graph
    init_net, predict_net = c2.onnx_graph_to_caffe2_net(graph_def, mobile)
    init_net_output.write(init_net.SerializeToString())
    output.write(predict_net.SerializeToString())
Exemple #3
0
def check_output(model, x):
    with tempfile.NamedTemporaryFile('wb') as fp:
        onnx_chainer.export(model, x, fp)
        onnx_model = onnx.ModelProto.FromString(open(fp.name, 'rb').read())

        init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(
            onnx_model.graph, device='CPU')

        benchmark_caffe2_model(init_net, predict_net)

        y = model(x)
        if isinstance(y, dict):
            y = y['prob']
        chainer_out = y.array
        caffe2_out = run_model(onnx_model, [x])[0]

        np.testing.assert_almost_equal(chainer_out, caffe2_out, decimal=5)
preview = torchvision.utils.make_grid(imgs)
preview = preview / 2 + 0.5  #unnormalize
npimg = preview.numpy()
plt.imshow(numpy.transpose(npimg, (1, 2, 0)))
#plt.show()
#fine-tunning a squeezenet
model_squ = models.squeezenet1_1(pretrained=True)
model_squ.features._modules["2"] = torch.nn.MaxPool2d(
    kernel_size=3, stride=2, dilation=1,
    ceil_mode=False)  #change output layer and a mistake
model_squ.classifier._modules["1"] = torch.nn.Conv2d(
    512, 6, kernel_size=(1, 1), stride=(1, 1))  #we have six classes
model_squ.num_classes = 6  # this is a variable must be changed, otherwise the net doesn't work
print(model_squ)
#define the loss function
e = torch.nn.CrossEntropyLoss()
opt = torch.optim.SGD(model_squ.parameters(), lr=0.001, momentum=0.92)
exp = torch.optim.lr_scheduler.StepLR(opt, step_size=15, gamma=0.1)
#start train
training = train.train_model
model_squ = training(model_squ, e, opt, exp, num_epochs=20)
#transfer thie model to a caffe2 model
x = Variable(torch.randn(1, 3, 224, 224), requires_grad=True)
onnxModel = torch.onnx._export(model_squ, x, "sqz.onnx", export_params=True)
model = onnx.load("sqz.onnx")
prepared_backend = onnx_caffe2.backend.prepare(model)
init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(model.graph)
with open("squeeze_init_net.pb", "wb") as f:
    f.write(init_net.SerializeToString())
with open("squeeze_predict_net.pb", "wb") as f:
    f.write(predict_net.SerializeToString())
Exemple #5
0
W = {model.graph.input[0].name: x.data.numpy()}

# Run the Caffe2 net:
c2_out = prepared_backend.run(W)[0]

# Verify the numerical correctness upto 3 decimal places
np.testing.assert_almost_equal(torch_out.data.cpu().numpy(), c2_out, decimal=3)

# Then we can export the model to run on mobile devices, leveraging the cross-platform capability of caffe2.

# In[5]:

# Export to mobile
from onnx_caffe2.backend import Caffe2Backend as c2

init_net, predict_net = c2.onnx_graph_to_caffe2_net(model.graph, True)
with open("squeeze_init_net.pb", "wb") as f:
    f.write(init_net.SerializeToString())
with open("squeeze_predict_net.pb", "wb") as f:
    f.write(predict_net.SerializeToString())

# You'll see squeeze_init_net.pb and squeeze_predict_net.pb in the same directory of this notebook. Let's make sure it can run with predictor since that's what we'll use in the Mobile App.
# Verify it runs with predictor
with open("squeeze_init_net.pb") as f:
    init_net = f.read()
with open("squeeze_predict_net.pb") as f:
    predict_net = f.read()
from caffe2.python import workspace

p = workspace.Predictor(init_net, predict_net)
# The following code should run:
Exemple #6
0
import onnx
import numpy as np
model = onnx.load("model.onnx")
onnx.checker.check_model(model)
import onnx_caffe2.backend
prepared_backend = onnx_caffe2.backend.prepare(model)
from onnx_caffe2.backend import Caffe2Backend as c2
init_net, predict_net = c2.onnx_graph_to_caffe2_net(model.graph, device="CPU")
with open("squeeze_init_net.pb", "wb") as f:
    f.write(init_net.SerializeToString())
with open("squeeze_predict_net.pb", "wb") as f:
    f.write(predict_net.SerializeToString())
Exemple #7
0
x = np.random.randn(1, 3, 224, 224).astype(np.float32)

# Do not forget setting train flag off!
chainer.config.train = False

# Export to ONNX model
onnx_model = onnx_chainer.export(model, x)

# Get an output of Chainer model
y = model(x)
if isinstance(y, dict):
    y = y['prob']
chainer_out = y.array

# Convert ONNX model to Caffe2 model
init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(
    onnx_model.graph, device='CPU')

# Save the Caffe2 model to disk
init_file = "./vgg16_init.pb"
predict_file = "./vgg16_predict.pb"
save_caffe2_net(init_net, init_file, output_txt=False)
save_caffe2_net(predict_net, predict_file, output_txt=True)

# Get an output of Caffe2 model
caffe2_out = run_model(onnx_model, [x])[0]

# Check those two outputs have same values
np.testing.assert_almost_equal(
    chainer_out, caffe2_out, decimal=5)
def PyTorchModule(helper,
                  model,
                  sample_arguments,
                  caffe2_inputs,
                  prefix_name=None):
    """
    Embed an ONNX-exportable PyTorch Model into a Caffe2 model being built.

    Arguments:
        helper (caffe2.python.core.ModelHelder): the model helper where
            this imported network should be inserted
        model (torch.nn.Module): the model to be exported
        sample_arguments (tuple of arguments): the inputs to
            the model, e.g., such that ``model(*args)`` is a valid
            invocation of the model.  Any non-Variable arguments will
            be hard-coded into the exported model; any Variable arguments
            will become inputs of the exported model, in the order they
            occur in args.  If args is a Variable, this is equivalent
            to having called it with a 1-ary tuple of that Variable.
            (Note: passing keyword arguments to the model is not currently
            supported.  Give us a shout if you need it.)
        caffe2_inputs (list of str or caffe2.python.core.BlobReference): the
           caffe2 Blobs that should be inputs to this network. Must be
           the same length as sample_arguments
        prefix_name: prefix name to add to each member of the blob, if None then
           a fresh prefix pytorch_input_N/ is used
    Returns:
        A tuple of caffe2.python.core.BlobReference objects referring to the
        models outputs, or a single BlobReference when the model returns a single
        value.
    """
    if prefix_name is None:
        global _next_idx
        prefix_name = 'pytorch_import_' + str(_next_idx) + '/'
        _next_idx += 1

    # TODO: handle the case where model cannot be exported
    # and embed as a Python op in Caffe2
    f = io.BytesIO()
    torch.onnx.export(model, sample_arguments, f, export_params=True)
    onnx_model = onnx.load(io.BytesIO(f.getvalue()))
    init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)

    initialized = set([x.name for x in onnx_model.graph.initializer])
    uninitialized_inputs = {
        x.name: i
        for i, x in enumerate(onnx_model.graph.input)
        if x.name not in initialized
    }

    if (len(uninitialized_inputs) != len(caffe2_inputs)):
        raise ValueError('Expected {} inputs but found {}'.format(
            len(uninitialized_inputs), len(caffe2_inputs)))

    def remap_blob_name(name):
        if name in uninitialized_inputs:
            idx = uninitialized_inputs[name]
            return str(caffe2_inputs[idx])
        return prefix_name + name

    predict_net = Net(predict_net).Clone('anon', _FakeDict(remap_blob_name))
    helper.net.AppendNet(predict_net)

    init_net = Net(init_net).Clone('anon', _FakeDict(remap_blob_name))
    helper.param_init_net.AppendNet(init_net)

    results = tuple([
        BlobReference(remap_blob_name(x.name), helper.net)
        for x in onnx_model.graph.output
    ])
    return results
    out = persisted_sess.graph.get_tensor_by_name('add_10:0')

    feed_dict = {inp: np.array(image)}

    res = persisted_sess.run(out, feed_dict)
    same = True if id2class[np.argmax(res)] == label else False
    print("(Check) Check if Tf_pb predicts right:", same)

# ============== ONNX to Caffe2_rep ==============
cf2_rep = cf2_backend.prepare(model)
output = cf2_rep.run(np_onnx_image.astype(np.float32))

same = True if id2class[np.argmax(output)] == label else False
print("(Check) Check if Cf2_rep predicts right:", same)

init_net, predict_net = cf2_backend2.onnx_graph_to_caffe2_net(model.graph)
with open("./ONNX/squeeze_init_net.pb", "wb") as f:
    f.write(init_net.SerializeToString())
with open("./ONNX/squeeze_predict_net.pb", "wb") as f:
    f.write(predict_net.SerializeToString())
print("(Make) Make pbs of caffe2 successfully.")

text = "const char * imagenet_classes[] {" + "\n"
for x in range(len(id2class.keys())):
    text += ('"{}",'.format(id2class[x]) + "\n")
text += "};"
with open("./ONNX/classes.h", "w") as f:
    f.write(text)
print("(Make) Make classes.h(Caffe2) successfully.")

# ============== tf.pb to tf.lite ==============
Exemple #10
0
import torch as t
import torch.nn as nn
import torchvision
model = vnet.VNet(elu=True, nll=False)
model = nn.parallel.DataParallel(model)
dummy_input = torch.randn(1, 1, 64, 224, 224, device='cuda')
state = t.load('vnet_checkpoint.pth')['state_dict']
model.load_state_dict(state)

model.train(False)
model = model.module.cuda()

torch.onnx.export(model, dummy_input, "vnet.onnx", verbose=True)

model = onnx.load("vnet.onnx")

# Check that the IR is well formed
# onnx.checker.check_model(model)

# Print a human readable representation of the graph
print(onnx.helper.printable_graph(model.graph))

# import caffe2.python.onnx.backend as c2
from onnx_caffe2.backend import Caffe2Backend
model_name = 'Vnet'
init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(model.graph,
                                                               device="CUDA")
with open(model_name + "_init.pb", "wb") as f:
    f.write(init_net.SerializeToString())
with open(model_name + "_predict.pb", "wb") as f:
    f.write(predict_net.SerializeToString())