Example #1
0
def gluon2keras(model, input_shapes, verbose=False, names=False):
    """
    Deep neural network model converter from gluon to keras (via pytorch)
    :param model: gluon model to convert
    :param input_shapes: list of input shapes
    :param verbose: verbose output
    :param names: keras names (keep, short, random-suffix)
    :return: keras model
    """

    # Convert gluon model to pytorch model
    pytorch_model = gluon2pytorch(model, input_shapes, dst_dir=None, pytorch_module_name='converted_model')
    pytorch_model.eval()

    # Fix shapes
    input_vars = []
    keras_shapes = []

    for shape in input_shapes:
        input_np = np.random.uniform(0, 1, shape)
        input_var = Variable(torch.FloatTensor(input_np))
        input_vars.append(input_var)
        keras_shapes.append(shape[1:])

    if len(input_vars) == 1:
        input_vars = tuple(input_vars)
    else:
        input_vars = tuple(*input_vars)

    # Convert pytorch model to keras
    k_model = pytorch_to_keras(pytorch_model, input_vars, keras_shapes, change_ordering=False, verbose=verbose, names=names)
    return k_model
Example #2
0
    def test_with_padding(self):
        max_error = 0
        for i in range(self.N):
            kernel_size = np.random.randint(1, 7)
            inp = np.random.randint(kernel_size + 1, 100)
            out = np.random.randint(1, 100)

            model = TestConvTranspose2d(inp,
                                        out,
                                        kernel_size,
                                        2,
                                        inp % 3,
                                        padding=1)

            input_np = np.random.uniform(0, 1, (1, inp, inp, inp))
            input_var = Variable(torch.FloatTensor(input_np))
            output = model(input_var)

            k_model = pytorch_to_keras(model,
                                       input_var, (
                                           inp,
                                           inp,
                                           inp,
                                       ),
                                       verbose=True)

            pytorch_output = output.data.numpy()
            keras_output = k_model.predict(input_np)

            error = np.max(pytorch_output - keras_output)
            print(error)
            if max_error < error:
                max_error = error

        print('Max error: {0}'.format(max_error))
def convert(model_name):
    input_np = np.random.uniform(0, 1, (1, 3, 32, 32))
    input_var = Variable(torch.FloatTensor(input_np))
    resume='whitebox_attack_target_models/checkpoints_100cifar_alexnet' + model_name + '/bestepoch'
    model =AlexNet(100)
    model = torch.nn.DataParallel(model)
    criterion = nn.CrossEntropyLoss()
    print('==> Resuming from checkpoint..')
    assert os.path.isfile(resume), 'Error: no checkpoint directory found!'
    checkpoint = os.path.dirname(resume)
    checkpoint = torch.load(resume, map_location='cpu')
    model.load_state_dict(checkpoint['state_dict'])


    model.eval()
    k_model = pytorch_to_keras(model.module, input_var, [(3, 32, 32,)], verbose=True, change_ordering=True)
    k_model.save(model_name+'model')
    print('saved')
    tf.keras.models.load_model(model_name+'model')
    print('loaded')

    for i in range(3):
        input_np = np.random.uniform(0, 1, (1, 3, 32, 32))
        input_var = Variable(torch.FloatTensor(input_np))
        output = model(input_var)
        pytorch_output = output.data.numpy()
        keras_output = k_model.predict(np.transpose(input_np, [0, 2, 3, 1]))
        error = np.max(pytorch_output - keras_output)
        print('error -- ', error)  # Around zero :)
Example #4
0
def convert_to_srzoo(model,
                     input_name='sr_input',
                     channels_first=True,
                     model_name='model.pb',
                     output_name='sr_output'):

    if (channels_first):
        input_np = np.zeros([1, 3, 128, 128])
        input_shape = (3, None, None)
    else:
        input_np = np.zeros([1, 128, 128, 3])
        input_shape = (None, None, 3)

    input_var = torch.autograd.Variable(torch.FloatTensor(input_np))

    keras_model = pytorch_to_keras(model, (input_var),
                                   input_names=[input_name],
                                   input_shapes=[input_shape],
                                   verbose=True)

    sess = tf.keras.backend.get_session()
    output = keras_model.outputs[0]

    with sess.graph.as_default():
        output_node = tf.identity(output, name=output_name)

    converter_common.write_tf_session_graph(sess=sess,
                                            model_name=model_name,
                                            output_name=output_name)
Example #5
0
def torch_to_keras(model, image_shape):
    """
    :param model: instance of PyTorch model
    :param image_shape: list of [c, h, w]
    """
    # use dummy variable to trace the model (see github README)
    input_np = np.random.uniform(0, 1,
                                 [1, *image_shape])  # add batch dimension
    input_var = torch.autograd.Variable(
        torch.tensor(input_np, dtype=torch.float, device=device))

    input_shapes = [image_shape]
    return pytorch_to_keras(model, input_var, input_shapes, verbose=False)
def convert_pytorch2keras2ir():
    max_error = 0
    model = VGG16(vgg([64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M', 512, 512, 512], 3))
    # load weights here
    state_dict_to_load = torch.load('vgg16_reducedfc/vgg16_reducedfc.pth', map_location=lambda storage, loc: storage)
    renamed_tate_dict_to_load = {}
    for k, v in state_dict_to_load.items():
        renamed_tate_dict_to_load['vgg.' + k] = v
    print(model.state_dict().keys())
    print(state_dict_to_load.keys())

    model.load_state_dict(renamed_tate_dict_to_load)

    for m in model.modules():
        m.training = False

    input_np = np.ones((1, 3, 224, 224)) * 0.5
    # np.random.uniform(0, 1, (1, 3, 224, 224))
    input_var = Variable(torch.FloatTensor(input_np))
    output = model(input_var)

    k_model = pytorch_to_keras((3, 224, 224,), output)

    pytorch_output = output.data.numpy()

    print(pytorch_output)
    print(np.argmax(pytorch_output))
    return

    keras_output = k_model.predict(input_np)

    error = np.max(pytorch_output - keras_output)
    print(error)
    if max_error < error:
        max_error = error

    print('Max error: {0}'.format(max_error))

    # save network structure as JSON
    json_string = k_model.to_json()
    with open("vgg16_reducedfc/imagenet_vgg16_reducedfc.json", "w") as of:
        of.write(json_string)

    print("Network structure is saved as [vgg16_reducedfc/imagenet_vgg16_reducedfc.json].")

    k_model.save_weights('vgg16_reducedfc/imagenet_vgg16_reducedfc.h5')

    print("Network weights are saved as [vgg16_reducedfc/imagenet_vgg16_reducedfc.h5].")

    parser = Keras2Parser(('vgg16_reducedfc/imagenet_vgg16_reducedfc.json', 'vgg16_reducedfc/imagenet_vgg16_reducedfc.h5'))
    parser.run('vgg16_reducedfc/ir')
Example #7
0
def convert_pytorch_to_keras(model, img_size, output_file):
    if isinstance(img_size, int):
        img_size = (img_size, img_size)

    n_channels = 3
    input_np = np.random.uniform(0, 1,
                                 (1, n_channels, img_size[0], img_size[1]))
    input_var = torch.FloatTensor(input_np)

    k_model = pytorch_to_keras(model,
                               input_var,
                               [(n_channels, img_size[0], img_size[1])],
                               verbose=True)
    k_model.save(output_file + ".h5")
Example #8
0
def convert(model_name, model_file, keras_model_file, no_top, variable_size,
            change_ordering):
    include_top = not no_top
    print('include top: {}'.format(include_top))
    print('variable size: {}'.format(variable_size))
    print('change ordering: {}'.format(change_ordering))
    rand_tensor = torch.rand(1, 3, 224, 224).cpu()
    model = eval(model_name)(model_file=model_file, include_top=include_top)
    k_model = pytorch_to_keras(model,
                               rand_tensor,
                               [(3, None,
                                 None)] if variable_size else [(3, 224, 224)],
                               verbose=False,
                               change_ordering=change_ordering)
    base_dir = os.path.dirname(keras_model_file)
    if not os.path.exists(base_dir):
        os.makedirs(base_dir)
    k_model.save(keras_model_file)
def converted_fully_convolutional_resnet18(
    input_tensor,
    pretrained_resnet=True,
):
    # define input tensor
    input_var = Variable(torch.FloatTensor(input_tensor))

    # get PyTorch ResNet18 model
    model_to_transfer = FullyConvolutionalResnet18(
        pretrained=pretrained_resnet)
    model_to_transfer.eval()

    # convert PyTorch model to Keras
    model = pytorch_to_keras(
        model_to_transfer,
        input_var,
        [input_var.shape[-3:]],
        change_ordering=True,
        verbose=False,
        name_policy="keep",
    )

    return model
Example #10
0
                at_loss=opt.at_loss)

path = "CubeModel/pretrained/save_60.pth"
state_dict = torch.load(path, map_location='cpu')["state_dict"]
for k in list(state_dict.keys()):
    k_new = k[7:]
    state_dict[k_new] = state_dict[k]
    state_dict.pop(k)
model.load_state_dict(state_dict, strict=True)
model.eval()

# we should specify shape of the input tensor
k_model = pytorch_to_keras(model,
                           input_var, [(
                               3,
                               opt.person_size,
                               opt.person_size,
                           )],
                           verbose=True,
                           name_policy='short')

frozen_graph = freeze_session(
    K.get_session(), output_names=[out.op.name for out in k_model.outputs])
tf.train.write_graph(frozen_graph, ".", "my_model.pb", as_text=False)
# print([i for i in k_model.outputs])
# keras_file = "my_model.h5"
# keras.models.save_model(model, keras_file)
# converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
# converter = lite.TFLiteConverter.from_keras_model(k_model)
# convert_frozen_model_to_NWHC("my_model.pb")
# input_array = ['input_0']
# output_array = ['output_0', 'output_1', 'output_2', 'output_3', 'output_4']
Example #11
0
 def save_h5_model(self, input_var, input_shape, save_dir):
     # torch.save(self, save_dir)
     k_model = pytorch_to_keras(self, input_var, input_shape, verbose=True)
     k_model.save(save_dir)
Example #12
0
import numpy as np
import torch
from torch.autograd import Variable
from pytorch2keras.converter import pytorch_to_keras
import torchvision.models as models

input_np = np.random.uniform(0, 1, (1, 3, 320, 320))
input_var = Variable(torch.FloatTensor(input_np))
model = models.resnet18()
model.eval()
k_model = pytorch_to_keras(model, input_var, [(3, 320, 320,)], verbose=True, change_ordering=True)

for i in range(3):
    input_np = np.random.uniform(0, 1, (1, 3, 320, 320))
    input_var = Variable(torch.FloatTensor(input_np))
    output = model(input_var)
    pytorch_output = output.data.numpy()
    keras_output = k_model.predict(np.transpose(input_np, [0, 2, 3, 1]))
    error = np.max(pytorch_output - keras_output)
    print('error -- ', error)
Example #13
0
    def forward(self, x, y, z):
        return self.conv2d(x) + self.conv2d(y) + self.conv2d(z)


if __name__ == '__main__':
    max_error = 0
    for i in range(100):
        kernel_size = np.random.randint(1, 7)
        inp = np.random.randint(kernel_size + 1, 100)
        out = np.random.randint(1, 100)

        model = TestMultipleInputs(inp, out, kernel_size, inp % 2)

        input_np = np.random.uniform(0, 1, (1, inp, inp, inp))
        input_var = Variable(torch.FloatTensor(input_np))
        input_var2 = Variable(torch.FloatTensor(input_np))
        input_var3 = Variable(torch.FloatTensor(input_np))
        output = model(input_var, input_var2, input_var3)

        k_model = pytorch_to_keras(model, [input_var, input_var2, input_var3], [(inp, inp, inp,), (inp, inp, inp,), (inp, inp, inp,)], verbose=True)
        k_model.summary()
        pytorch_output = output.data.numpy()
        keras_output = k_model.predict([input_np, input_np, input_np])

        error = np.max(pytorch_output - keras_output)
        print(error)
        if max_error < error:
            max_error = error

    print('Max error: {0}'.format(max_error))
Example #14
0
from keras import backend as K
import tensorflow as tf

SIZE = 224
MODEL = 'sqnxt23v5_w2'

model = ptcv_get_model('sqnxt23v5_w2', pretrained=True)
model.eval()

input_np = np.random.uniform(0, 1, (1, 3, SIZE, SIZE))
input_var = Variable(torch.FloatTensor(input_np))
output = model(input_var)

k_model = pytorch_to_keras(model,
                           input_var, (3, SIZE, SIZE),
                           verbose=True,
                           name_policy='renumerate')

# Check model
pytorch_output = output.data.numpy()
keras_output = k_model.predict(input_np)
error = np.max(pytorch_output - keras_output)
print('Error: {0}'.format(error))


def freeze_session(session,
                   keep_var_names=None,
                   output_names=None,
                   clear_devices=True):
    """
    Freezes the state of a session into a pruned computation graph.
    error = np.max(pytorch_output - keras_output)
    print('Error:', error)

    assert error < epsilon
    return error


if __name__ == '__main__':
    max_error = 0

    for i in range(10):
        emb_size = np.random.randint(10, 1000)
        inp_size = np.random.randint(10, 1000)

        model = LayerTest(inp_size, emb_size)
        model.eval()

        input_np = np.random.uniform(0, 1, (1, 1, inp_size))
        input_var = Variable(torch.LongTensor(input_np))
        output = model(input_var)

        k_model = pytorch_to_keras(model,
                                   input_var, [(1, inp_size)],
                                   verbose=True)

        error = check_error(output, k_model, input_np)
        if max_error < error:
            max_error = error

    print('Max error: {0}'.format(max_error))
Example #16
0
if __name__ == '__main__':
    max_error = 0

    for i in range(10):
        model = FTest()
        model.eval()

        input_np1 = np.random.uniform(0, 1, (1, 3, 224, 224))
        input_np2 = np.random.uniform(0, 1, (1, 3, 224, 224))
        input_var1 = Variable(torch.FloatTensor(input_np1))
        input_var2 = Variable(torch.FloatTensor(input_np2))
        output = model(input_var1, input_var2)

        k_model = pytorch_to_keras(model, [input_var1, input_var2], [(
            3,
            224,
            224,
        ), (
            3,
            224,
            224,
        )],
                                   verbose=True)

        error = check_error(output, k_model, [input_np1, input_np2])
        if max_error < error:
            max_error = error

    print('Max error: {0}'.format(max_error))
    print('Error:', error)

    assert error < epsilon
    return error


if __name__ == '__main__':
    max_error = 0
    for i in range(100):
        model = ResNet(torchvision.models.resnet.BasicBlock, [2, 2, 2, 2])
        model.eval()

        input_np = np.random.uniform(0, 1, (1, 3, 224, 224))
        input_var = Variable(torch.FloatTensor(input_np))
        output = model(input_var)

        k_model = pytorch_to_keras(model,
                                   input_var, (
                                       3,
                                       224,
                                       224,
                                   ),
                                   verbose=True,
                                   change_ordering=True)

        error = check_error(output, k_model, input_np.transpose(0, 2, 3, 1))
        if max_error < error:
            max_error = error

    print('Max error: {0}'.format(max_error))
    assert error < epsilon
    return error


if __name__ == '__main__':
    max_error = 0
    for i in range(50):
        import random
        model = LayerTest(dim=np.random.randint(0, 3))
        model.eval()

        input_np = np.random.uniform(0, 1, (1, 3, 224, 224))
        input_var = Variable(torch.FloatTensor(input_np))
        output = model(input_var)

        k_model = pytorch_to_keras(model, input_var, (3, 224, 224,), verbose=True)

        error = check_error(output, k_model, input_np)
        if max_error < error:
            max_error = error

    for i in range(50):
        model = FTest(dim=np.random.randint(0, 3))
        model.eval()

        input_np = np.random.uniform(0, 1, (1, 3, 224, 224))
        input_var = Variable(torch.FloatTensor(input_np))
        output = model(input_var)

        k_model = pytorch_to_keras(model, input_var, (3, 224, 224,), verbose=True)
Example #19
0
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(dummy_input_1)}
ort_outs = ort_session.run(None, (ort_inputs, ort_inputs))

# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(torch_out),
                           ort_outs[0],
                           rtol=1e-03,
                           atol=1e-05)

print(
    "Exported model has been tested with ONNXRuntime, and the result looks good!"
)

########################
###
import torch
from pytorch2keras.converter import pytorch_to_keras

x = torch.randn(1, 3, 224, 224, requires_grad=False, device='cuda')
k_model = pytorch_to_keras(aa,
                           x, [(
                               3,
                               None,
                               None,
                           )],
                           verbose=True,
                           names='short')
k_model.save(r'e:\keras.h5')
Example #20
0
pip install pytorch2keras
'''

# Create and load model
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()

# Make dummy variables (and checking if the model works)
input_np = np.random.uniform(0, 1, (1, 3, 224, 224))
input_var = Variable(torch.FloatTensor(input_np))
output = model(input_var)

# Convert the model!
k_model = pytorch_to_keras(model, input_var, (3, 224, 224), 
                     verbose=True, name_policy='short',
                     change_ordering=True)

# Save model to SavedModel format
tf.saved_model.save(k_model, "./output/mobilenet_saved")


# Convert Keras model to ConcreteFunction
full_model = tf.function(lambda x: k_model(x))
full_model = full_model.get_concrete_function(
    tf.TensorSpec(k_model.inputs[0].shape, k_model.inputs[0].dtype))

# Get frozen ConcreteFunction
frozen_func = convert_variables_to_constants_v2(full_model)
frozen_func.graph.as_graph_def()
Example #21
0
    eprint('output = %s' % (o))

### INPUT
model = models.resnet50(pretrained=False)
model.fc = nn.Linear(2048, 683)
pretrained = torch.load(i)['model']
modelDictionary = model.state_dict()
for k in modelDictionary.keys():
    if (('module.' + k) in pretrained.keys()):
        modelDictionary[k] = pretrained.get(('module.' + k))
model.load_state_dict(modelDictionary)
for name, child in model.named_children():
    for name2, params in child.named_parameters():
        params.requires_grad = False
summary(model, (3, 224, 224), device='cpu')

### CONVERT TO KERAS JSON (TO AVOID PYTHON 3.7.5 VS 3.6.9 OPTCODE CONFLICTS... WTF)
input_np = np.random.uniform(0, 1, (1, 3, 224, 224))
input_var = Variable(torch.FloatTensor(input_np))
kmodel = pytorch_to_keras(model, input_var, (
    3,
    224,
    224,
), verbose=False)
kmodel.summary()
kmodel.save(o + '.hdf5')
kmodelJSON = kmodel.to_json()
with open(o + '.json', 'w') as JSON:
    JSON.write(kmodelJSON)
kmodel.save_weights(o + '-weights.hdf5')
Example #22
0
    max_error = 0
    for i in range(100):
        kernel_size = np.random.randint(1, 10)
        inp = np.random.randint(kernel_size + 1, 100)
        out = np.random.randint(1, 2)

        model = TestConv2d(inp + 2, out, kernel_size, inp % 2)

        input_np = np.random.uniform(0, 1, (1, inp + 2, inp, inp))
        input_var = Variable(torch.FloatTensor(input_np))
        output = model(input_var)

        k_model = pytorch_to_keras(model,
                                   input_var, (
                                       inp + 2,
                                       inp,
                                       inp,
                                   ),
                                   change_ordering=True,
                                   verbose=True)

        pytorch_output = output.data.numpy()
        keras_output = k_model.predict(input_np.transpose(0, 2, 3, 1))

        error = np.max(pytorch_output - keras_output.transpose(0, 3, 1, 2))
        print(error)
        if max_error < error:
            max_error = error

    print('Max error: {0}'.format(max_error))
Example #23
0
    def forward(self, x):
        x = self.conv2d(x)
        x = self.pool(x)
        return x


if __name__ == '__main__':
    max_error = 0
    for i in range(100):
        kernel_size = np.random.randint(4, 7)
        inp = np.random.randint(kernel_size + 1, 100)
        out = np.random.randint(1, 100)

        model = AvgPool(inp, out, kernel_size, inp % 2)

        input_np = np.random.uniform(0, 1, (1, inp, inp, inp))
        input_var = Variable(torch.FloatTensor(input_np))
        output = model(input_var)

        k_model = pytorch_to_keras(model, input_var, (inp, inp, inp,), verbose=True, names='keep')
        print(k_model.summary())
        pytorch_output = output.data.numpy()
        keras_output = k_model.predict(input_np)

        error = np.max(pytorch_output - keras_output)
        print(error)
        if max_error < error:
            max_error = error

    print('Max error: {0}'.format(max_error))
Example #24
0
class TestEmbedding(nn.Module):
    def __init__(self, input_size):
        super(TestEmbedding, self).__init__()
        self.embedd = nn.Embedding(input_size, 100)

    def forward(self, input):
        return self.embedd(input)


if __name__ == '__main__':
    max_error = 0
    for i in range(100):
        input_np = np.random.randint(0, 10, (1, 1, 4))
        input = Variable(torch.LongTensor(input_np))

        simple_net = TestEmbedding(1000)
        output = simple_net(input)

        k_model = pytorch_to_keras(simple_net, input, (1, 4), verbose=True)

        pytorch_output = output.data.numpy()
        keras_output = k_model.predict(input_np)

        error = np.max(pytorch_output - keras_output[0])
        print(error)
        if max_error < error:
            max_error = error

    print('Max error: {0}'.format(max_error))
Example #25
0
    # model = test.main()
    model = torch.load(
        "/home/dp/Desktop/algorithms/Pelee.Pytorch/weights/Pelee_COCO_size304_epoch40.pth"
    )
    model = model.cuda()  ##cuda
    summary(model, (3, 304, 304))  ##summary(model, (channels, pic_h, pic_w))
    model.eval()

    ##step2: pytorch .pth to keras .h5  and test .h5
    input_np = np.random.uniform(0, 1, (1, 3, 304, 304))
    input_var = Variable(torch.FloatTensor(input_np)).cuda()  ##cuda
    # input_var = Variable(torch.FloatTensor(input_np))
    k_model = pytorch_to_keras(model,
                               input_var, (
                                   3,
                                   304,
                                   304,
                               ),
                               verbose=True,
                               name_policy='short')
    k_model.summary()
    k_model.save('my_model.h5')

    output = model(input_var)
    check_error(output, k_model,
                input_np)  ## check the error between .pth and .h5

    ##step3: load .h5 and .h5 to .pb
    tf.keras.backend.clear_session()
    tf.keras.backend.set_learning_phase(0)  ##不可少,
    my_model = load_model('my_model.h5')
    h5_to_pb(my_model, output_dir='./model/', model_name='model.pb')
Example #26
0
        self.relu = nn.LeakyReLU(inplace=True)

    def forward(self, x):
        x = self.linear(x)
        x = self.relu(x)
        return x


if __name__ == '__main__':
    max_error = 0
    for i in range(100):
        inp = np.random.randint(1, 100)
        out = np.random.randint(1, 100)
        model = TestLeakyRelu(inp, out, inp % 2)

        input_np = np.random.uniform(0, 1, (1, inp))
        input_var = Variable(torch.FloatTensor(input_np))
        output = model(input_var)

        k_model = pytorch_to_keras(model, input_var, (inp, ), verbose=True)

        pytorch_output = output.data.numpy()
        keras_output = k_model.predict(input_np)

        error = np.max(pytorch_output - keras_output)
        print(error)
        if max_error < error:
            max_error = error

    print('Max error: {0}'.format(max_error))
Example #27
0
        kernel_size = np.random.randint(1, 7)
        inp = np.random.randint(kernel_size + 1, 100)
        out = np.random.randint(1, 100)

        model = TestConv2d(inp, out, kernel_size, inp % 2)
        for m in model.modules():
            m.training = False

        input_np = np.random.uniform(0, 1, (1, inp, inp, inp))
        input_var = Variable(torch.FloatTensor(input_np))
        output = model(input_var)

        k_model = pytorch_to_keras(model,
                                   input_var, (
                                       inp,
                                       inp,
                                       inp,
                                   ),
                                   verbose=True,
                                   short_names=True)

        pytorch_output = output.data.numpy()
        keras_output = k_model.predict(input_np)

        error = np.max(pytorch_output - keras_output)
        print(error)
        if max_error < error:
            max_error = error

    print('Max error: {0}'.format(max_error))