예제 #1
0
def convert(model: str, output: str, input_shape: List[int], net_path,
            net_name):
    """Convert PyTorch model to tfLite model
    
    Args:
        model (str): PyTorch model file path  
        output (str): output file name for Keras  
        input_shape (List[int]): Tensor shape for input  
        net_path ([type]): Python script defines the model  
        net_name ([type]): PyTorch model class in the `net_path` file.
    """

    x = torch.randn(*input_shape, requires_grad=True)  # pylint: disable=no-member
    load_package(net_path, net_name)
    model = torch.load(model)

    # convert to keras model first
    # change_ordering experimental, change BCHW to BHWC to enable conversion to tflite
    k_model = pytorch_to_keras(model,
                               x, [input_shape[1:]],
                               verbose=False,
                               change_ordering=True)
    k_model.save("tmp.h5")

    # convert to tflite model
    converter = create_converter("tmp.h5", model_loader)
    tflite_model = converter.convert()
    with open(output, "wb") as f:
        f.write(tflite_model)
    remove("tmp.h5")
예제 #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "model_path",
        type=str,
        help=
        "path to pretrained model to load. folder must contain final_model.pth.tar",
    )

    parser.add_argument(
        "model_type",
        type=str,
        help="type of model to train or test",
        choices=MODEL_TYPES,
        default="alexnet",
    )

    parser.add_argument("num_classes",
                        type=int,
                        help="number of classes in the model",
                        default=24)

    parser.add_argument("output_path", type=str, help="output path")

    args = parser.parse_args()

    MODEL_PATH = os.path.abspath(args.model_path)
    MODEL_TYPE = args.model_type
    NUM_CLASSES = args.num_classes
    OUTPUT_PATH = os.path.abspath(args.output_path)

    model = select_model(MODEL_TYPE, NUM_CLASSES)
    model.load_state_dict(
        torch.load(os.path.join(MODEL_PATH, "final_model.pth.tar"),
                   map_location=torch.device('cpu')))
    model.eval()

    input_np = np.random.uniform(0, 1, (1, 3, 224, 224))
    input_var = torch.autograd.Variable(torch.FloatTensor(input_np))
    # we should specify shape of the input tensor
    k_model = pytorch_to_keras(model,
                               input_var, (
                                   3,
                                   224,
                                   224,
                               ),
                               verbose=False,
                               name_policy="short")
    print(k_model.summary())
    print(f"saving keras model to {OUTPUT_PATH}")
    k_model.save('keras_model_2.h5', save_format='h5')
    # frozen_graph = freeze_session(
    #     K.get_session(), output_names=[out.op.name for out in k_model.outputs]
    # )

    # tf.train.write_graph(frozen_graph, ".", "my_model.pb", as_text=False)
    print([i for i in k_model.outputs])
예제 #3
0
def compute_hess_eigs(model, loader, neigs=50, augerino=False):
    model = model.cuda()
    input_shape = Variable(torch.rand(1, 3, 32, 32))
    if augerino:
        tfdset = convert_dset_to_tf(loader, model)
        keras_model = pytorch_to_keras(model.model.cpu(), input_shape)
    else:
        keras_model = pytorch_to_keras(model.cpu(), input_shape)
        tfdset = convert_dset_to_tf(loader)

    V, T = lanczos_algorithm.approximate_hessian(keras_model,
                                                 loss_fn,
                                                 tfdset.batch(128),
                                                 order=neigs,
                                                 random_seed=1)

    eigs, wghts = density_lib.tridiag_to_eigv([T.numpy()])
    torch.cuda.empty_cache()

    return eigs
예제 #4
0
def convert(model: str, output: str, input_shape: List[int], net_path, net_name):
    """Convert PyTorch model to Keras model
    
    Args:
        model (str): PyTorch model file path
        output (str): output file name for Keras model
        input_shape (List[int]): Tensor shape for input
        net_path ([type]): Python script defines the model
        net_name ([type]): PyTorch model class in the `net_path` file.
    """
    x = torch.randn(*input_shape, requires_grad=True) # pylint: disable=no-member
    load_package(net_path, net_name)
    model = torch.load(model)
    k_model = pytorch_to_keras(model, x, [input_shape[1:]], verbose=False)

    # Export the model
    k_model.save(output)
예제 #5
0
        exit()

    checkpoint = torch.load(torch_model_path)
    state_dict = checkpoint['state_dict']
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        print(k)
        # if 'module' not in k:
        #     k = 'module.' + k
        if 'module' in k:
            k = k.replace('module.', '')
        new_state_dict[k] = v
    torch_model.load_state_dict(new_state_dict)
    torch_model.eval()

    input_np = np.random.uniform(0, 1, (1, 3, 32, 32))
    input_var = Variable(torch.FloatTensor(input_np))

    k_model = p2k.pytorch_to_keras(torch_model, input_var, [(3, 32, 32,)], verbose=True)
    k_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    k_model.summary()


    flat1 = k_model.layers[-1].output
    output = tf.keras.layers.Activation('softmax')(flat1)
    model = tf.keras.models.Model(inputs=k_model.inputs, outputs=output)
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    model.summary()

    model.save(save_path)
예제 #6
0
def only_model(args, device_id):

    logger.info('Loading checkpoint from %s' % args.test_from)
    checkpoint = torch.load(args.test_from,
                            map_location=lambda storage, loc: storage)

    ### We load our ExtSummarizer model
    model = ExtSummarizer(args, device, checkpoint)
    model.eval()

    ### We create an encoder and a decoder like those of ExtSummarizer and load the latter parameters into the former
    ### This is for the test sake
    encoder = Bert(False, '/tmp', True)
    load_my_state_dict(encoder, checkpoint['model'])

    decoder = ExtTransformerEncoder(encoder.model.config.hidden_size,
                                    args.ext_ff_size, args.ext_heads,
                                    args.ext_dropout, args.ext_layers)
    load_my_state_dict_decoder(decoder, checkpoint['model'])

    encoder.eval()
    decoder.eval()

    seq_len = 250

    ### We test if the parameters have been well loaded
    input_ids = torch.tensor([np.random.randint(100, 15000, seq_len)],
                             dtype=torch.long)
    mask = torch.ones(1, seq_len, dtype=torch.float)
    clss = torch.tensor([[20, 36, 55, 100, 122, 130, 200, 222]],
                        dtype=torch.long)
    mask_cls = torch.tensor([[1] * len(clss[0])], dtype=torch.long)
    """## test encoder
    top_vec = model.bert(input_ids, mask)
    top_vec1 = encoder(input_ids, mask)
    logger.info((top_vec-top_vec1).sum())

    ## test decoder
    sents_vec = top_vec[torch.arange(top_vec.size(0)).unsqueeze(1), clss]
    sents_vec = sents_vec * mask_cls[:, :, None].float()

    sents_vec1 = top_vec1[torch.arange(top_vec1.size(0)).unsqueeze(1), clss]
    sents_vec1 = sents_vec1 * mask_cls[:, :, None].float()


    scores = model.ext_layer(sents_vec, mask_cls)
    scores1  = decoder(sents_vec1, mask_cls)
    logger.info((scores-scores1).sum())"""

    ################# ONNX ########################"

    ## Now we are exporting the encoder and the decoder into onnx
    """input_names = ["input_ids", "mask"]
    output_names = ["hidden_outputs"]
    torch.onnx.export(model.bert.to('cpu'), (input_ids, mask), "/tmp/encoder5.onnx", verbose=True, 
                      input_names=input_names, output_names=output_names, export_params=True, keep_initializers_as_inputs=True)"""

    k_model = pytorch_to_keras(model.bert.to('cpu'), [input_ids, mask], [(
        1,
        250,
    ), (
        1,
        250,
    )],
                               verbose=True)

    print("okkk")
    """logger.info("Load onnx and test")
예제 #7
0
parser.add_argument('--width',
                    type=float,
                    default=0.125,
                    help='Width of the ResNet')
parser.add_argument('--input_dim',
                    type=int,
                    default=64,
                    help='Input dimension of the training images')
args = parser.parse_args()

# load finetuned model
state_dict = torch.load(args.finetuned)
resnet = models.ResNetSimCLR(name=args.model, width=args.width)
classifier = Classifier(resnet)
classifier.load_state_dict(state_dict)
classifier.eval()

# dummy
dummy_input = torch.ones(1, 3, args.input_dim, args.input_dim)

# pytorch to keras
keras_model = pytorch_to_keras(classifier,
                               dummy_input,
                               [(3, args.input_dim, args.input_dim)],
                               change_ordering=True)
keras_model.summary()

# save checkpoint
print('Conversion finished! Storing checkpoint at %s' % (args.keras_model))
keras_model.save(args.keras_model)
예제 #8
0
import numpy as np
from pytorch2keras import pytorch_to_keras
import torch
import tensorflow as tf
from tensorflow import keras
from SCGamePredictor import SCGameRecommender

input_np = np.random.uniform(0, 1, (1, 18))
input_var = torch.FloatTensor(input_np)

model = SCGameRecommender("model_SupplyChainGame_4_4.pt")

k_model = pytorch_to_keras(model, input_var, [(18, )], verbose=True)

k_model.save("model.keras")

x = np.array([model.initial_state()])
예제 #9
0
    visible_devices = tf.config.get_visible_devices()
    for device in visible_devices:
        assert device.device_type != 'GPU'
except:
    # Invalid device or cannot modify virtual devices once initialized.
    pass

K.set_image_data_format("channels_first")

dummy_input = torch.rand(1, 1, 32, 128)
dummy_output = model_pytorch(dummy_input)

model_keras = pytorch2keras.pytorch_to_keras(model_pytorch,
                                             dummy_input, [(
                                                 1,
                                                 32,
                                                 128,
                                             )],
                                             verbose=True,
                                             change_ordering=True)

print(model_keras.summary())

K.set_image_data_format("channels_last")

np_input = dummy_input.view(1, 32, 128, 1).detach().numpy()
np_output = dummy_output.view(1, 32, 128, 1).detach().numpy()
tf_input = tf.convert_to_tensor(np_input)
tf_output = tf.convert_to_tensor(np_output)

converter = tf.lite.TFLiteConverter.from_keras_model(model_keras)
model_tfl = converter.convert()
예제 #10
0
from argparse import ArgumentParser

import torch
from torch.autograd import Variable
from pytorch2keras import pytorch_to_keras
import tensorflowjs as tfjs

from models import Generator

if __name__ == "__main__":
    parser = ArgumentParser(
        description="Convert the generator into a Tensorflow JS model")
    parser.add_argument("input", type=str, help="The path to the saved model")
    parser.add_argument(
        "output",
        type=str,
        help="The directory to save the Tensorflow JS model to")
    args = parser.parse_args()

    generator = Generator()
    generator.load_state_dict(torch.load(args.input)["generator"])

    input_var = Variable(torch.randn(1, 100, 1, 1))
    keras_generator = pytorch_to_keras(generator, input_var, [(100, 1, 1)])

    tfjs.converters.save_keras_model(keras_generator, args.output)
예제 #11
0
final_specs = nn.ModuleList([
    nn.Sequential(model.trunk, model.prob, nn.Sigmoid())
    for model in spec_models
])
final_overs = nn.ModuleList([
    nn.Sequential(model.trunk, model.prob, nn.Sigmoid())
    for model in over_models
])

print("Keras-ifying Models")

k_spec = []
k_over = []
for i in range(len(final_specs)):
    final_specs[i].eval()
    spec_model_k = pytorch_to_keras(final_specs[i], spec_xtrain, verbose=True)
    k_spec.append(spec_model_k)
    spec_model_k.save(f'Final_DSS_{i:03d}.h5')
    torch.save(final_specs[i], f'Final_DSS_{i:03d}.pth')
for i in range(len(final_overs)):
    final_overs[i].eval()
    over_model_k = pytorch_to_keras(final_overs[i], over_xtrain, verbose=True)
    k_over.append(over_model_k)
    over_model_k.save(f'Final_OS_{i:03d}.h5')
    torch.save(final_specs[i], f'Final_OS_{i:03d}.pth')

#pdb.set_trace()
spec_input = Input(shape=(9, ))
spec_output = KB.mean(KB.concatenate([spec(spec_input) for spec in k_spec],
                                     axis=0),
                      axis=0)
예제 #12
0
import torch
import sys
from pytorch2keras import pytorch_to_keras
import tensorflow as tf

args = sys.argv
if not len(args) == 3:
    print('Usage: cmd <input model> <output path>')
    exit()

inmodel = args[1]
outmodel = args[2]

sys.path.insert(0, '../reid-strong-baseline')
model = torch.load(inmodel, map_location=torch.device('cpu'))
model.train(False)

dummy = torch.randn(1, 3, 256, 256)
#torch.onnx.export(model, dummy, outmodel, keep_initializers_as_inputs=True, export_params=True)
# we should specify shape of the input tensor
k_model = pytorch_to_keras(model, dummy, verbose=True)
tf.saved_model.save(k_model, outmodel)
예제 #13
0
import tensorflow.keras.backend as KB


print("Keras-ifying Models")

final_specs = [torch.load(model) for model in glob.glob("Final_DSS_*.pth")]
final_overs = [torch.load(model) for model in glob.glob("Final_OS_*.pth")]

dummy_input = torch.randn(2,9)

k_spec = []
k_over = []

for i in range(len(final_specs)):
	final_specs[i].eval()
	spec_model_k = pytorch_to_keras(final_specs[i], dummy_input, verbose=True)
	k_spec.append(spec_model_k)
	spec_model_k.save(f'Final_DSS_{i:03d}.h5')
for i in range(len(final_overs)):
	final_overs[i].eval()
	over_model_k = pytorch_to_keras(final_overs[i], dummy_input, verbose=True)
	k_over.append(over_model_k)
	over_model_k.save(f'Final_OS_{i:03d}.h5')

pdb.set_trace()
spec_input = Input(shape=(9,))
spec_output = KB.mean(KB.concatenate([spec(spec_input) for spec in k_spec], axis=1), axis=1)

spec_combined_model_k = KModel(inputs=spec_input, outputs=spec_output)
spec_combined_model_k.save(f'Final_DSS.h5')
        return self.conv_final(x)


if __name__ == '__main__':

    # Creating / loading pre-trained PyNET model

    model = UNet()
    model.eval()

    # Converting model to Keras

    for _ in model.modules():
        _.training = False

    sample_input = torch.randn(1, 3, 1024, 1536)
    input_nodes = ['input']
    output_nodes = ['output']

    k_model = pytorch_to_keras(model, sample_input, change_ordering=True, verbose=True)
    k_model.save("model.h5")

    # Converting model to TFLite

    converter = tf.compat.v1.lite.TFLiteConverter.from_keras_model_file("model.h5")
    converter.experimental_new_converter = True

    tflite_model = converter.convert()
    open("model.tflite", "wb").write(tflite_model)

    nn.ReLU(),
    nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=False),
    Lambda(lambda x: x.view(x.size(0), -1)),  # View,
    nn.Sequential(Lambda(lambda x: x.view(1, -1) if 1 == len(x.size()) else x),
                  nn.Linear(25088, 4096)),  # Linear,
    nn.ReLU(),
    nn.Dropout(0.5),
    nn.Sequential(Lambda(lambda x: x.view(1, -1) if 1 == len(x.size()) else x),
                  nn.Linear(4096, 4096)),  # Linear,
    nn.ReLU(),
    nn.Dropout(0.5),
    nn.Sequential(Lambda(lambda x: x.view(1, -1) if 1 == len(x.size()) else x),
                  nn.Linear(4096, 2622)),  # Linear,
    nn.Softmax())

# load weights
pytorch_model.load_state_dict(torch.load('VGG_FACE.pth'))

# temporarily remove Dropout and Softmax layers because onnx does not support
# these two layers for PyTorch2Keras conversion
pytorch_model = nn.Sequential(pytorch_model[:34], pytorch_model[35:37],
                              pytorch_model[38])
torch.save(pytorch_model.state_dict(), 'pytorch_model.pth')

# create a dummy variable with correct shape
input_np = np.random.uniform(0, 1, (1, 3, 224, 224))
input_var = Variable(torch.FloatTensor(input_np))

# convert model
k_model = pytorch_to_keras(pytorch_model, input_var, verbose=True)
k_model.save_weights('keras_weights.h5')
예제 #16
0
    x = self.classifier(x)
    return x
# Monkey patch the model
network.__class__.forward = forward
classifier[2] = network.classifier[0]
classifier[3] = network.classifier[1]
network.classifier = classifier

# Convert to Keras format
IMAGE_SIZE = 224
input_np = np.random.uniform(0, 1, (1, 3, IMAGE_SIZE, IMAGE_SIZE))
network.eval();
with torch.no_grad():
    input_var = Variable(torch.FloatTensor(input_np))
    pytorch_output = network(input_var).data.numpy()
    k_model = pytorch_to_keras(network, input_var, [(3, IMAGE_SIZE, IMAGE_SIZE)], verbose=False, name_policy='renumerate')  
    keras_output = k_model.predict(input_np)
    # This should be less than 1e-6
    error = np.max(pytorch_output - keras_output)
    print('Conversion prediction error =',error,"(fine if around 1e-6)")
    #print(k_model.summary())

# Convert to Tensorflow.js
tfjs.converters.save_keras_model(k_model, 'mobilenetjs')
tfjs.converters.save_keras_model(k_model, 'mobilenetjs_quantised', quantization_dtype=np.uint8)

# Bug with reshape in tensorflowjs https://github.com/tensorflow/tfjs/issues/824
# Fix by editing expected shape in model.json:
def fix_model_json(location):
    import json
    with open(location, "r") as f: