# print("Loading vgg19 weights...")
    # vgg_model = VGG19(include_top=False, weights='imagenet')
    # for layer in model.layers:
    #     if layer.name in from_vgg:
    #         vgg_layer_name = from_vgg[layer.name]
    #         layer.set_weights(vgg_model.get_layer(vgg_layer_name).get_weights())
    #         print("Loaded VGG19 layer: " + vgg_layer_name)
    last_epoch = 0

# prepare generators

if use_client_gen:
    train_client = DataGeneratorClient(port=pargs.port,
                                       host="localhost",
                                       hwm=160,
                                       batch_size=batch_size,
                                       pstages=paramNumStages)
    train_client.start()
    train_di = train_client.gen()
    train_samples = 52597

    val_client = DataGeneratorClient(port=pargs.port + 1,
                                     host="localhost",
                                     hwm=160,
                                     batch_size=batch_size,
                                     pstages=paramNumStages)
    val_client.start()
    val_di = val_client.gen()
    val_samples = 2645
else:
    lc = 0
    for layer in model.layers:
        try:
            rn_layer = rn.get_layer(layer.name)
            if type(rn_layer) is Conv2D:
                print "Loading weights for layer", layer.name
                layer.set_weights(rn_layer.get_weights())
                lc += 1
        except:
            print "Skipping Layer ", layer.name

    print "Done loading weights for %d resnet conv layers" % lc

# prepare generators
stages = 1
train_client = DataGeneratorClient(port=5555, host="localhost", hwm=160, batch_size=batch_size, with_pafs=True, stages=stages)
train_client.start()
train_di = train_client.gen()
train_samples = 3000  # 52597 # All train samples in the COCO dataset

val_client = DataGeneratorClient(port=5556, host="localhost", hwm=160, batch_size=batch_size, with_pafs=True, stages=stages)
val_client.start()
val_di = val_client.gen()
val_samples = 1000  # 2645 # All validation samples in the COCO dataset


# euclidean loss as implemented in caffe https://github.com/BVLC/caffe/blob/master/src/caffe/layers/euclidean_loss_layer.cpp
def eucl_loss(x, y):
    return K.sum(K.square(x - y)) / batch_size / 2

losses = {}
Example #3
0
    vgg_model = VGG19(include_top=False, weights='imagenet')

    for layer in model.layers:
        if layer.name in from_vgg:
            vgg_layer_name = from_vgg[layer.name]
            layer.set_weights(
                vgg_model.get_layer(vgg_layer_name).get_weights())
            print("Loaded VGG19 layer: " + vgg_layer_name)

    last_epoch = 0

# prepare generators

if use_client_gen:
    train_client = DataGeneratorClient(port=5555,
                                       host="localhost",
                                       hwm=160,
                                       batch_size=10)
    train_client.start()
    train_di = train_client.gen()
    train_samples = 52597

    val_client = DataGeneratorClient(port=5556,
                                     host="localhost",
                                     hwm=160,
                                     batch_size=10)
    val_client.start()
    val_di = val_client.gen()
    val_samples = 2645
else:
    train_di = DataIterator("../dataset/train_dataset.h5",
                            data_shape=(3, 368, 368),
Example #4
0
    vgg_model = VGG19(include_top=False, weights='imagenet')

    for layer in model.layers:
        if layer.name in from_vgg:
            vgg_layer_name = from_vgg[layer.name]
            layer.set_weights(
                vgg_model.get_layer(vgg_layer_name).get_weights())
            print("Loaded VGG19 layer: " + vgg_layer_name)

    last_epoch = 0

# prepare generators
if use_client_gen:
    train_client = DataGeneratorClient(port=5555,
                                       host="localhost",
                                       nb_parts=14,
                                       nb_limbs=13,
                                       hwm=160,
                                       batch_size=batch_size)
    train_client.start()
    train_di = train_client.gen()
    train_samples = 4000  # modified by g.hy

    val_client = DataGeneratorClient(port=5556,
                                     host="localhost",
                                     nb_parts=14,
                                     nb_limbs=13,
                                     hwm=160,
                                     batch_size=batch_size)
    val_client.start()
    val_di = val_client.gen()
    val_samples = 500
    print("Loading vgg19 weights...")

    vgg_model = VGG19(include_top=False, weights='imagenet')

    for layer in model.layers:
        if layer.name in from_vgg:
            vgg_layer_name = from_vgg[layer.name]
            layer.set_weights(vgg_model.get_layer(vgg_layer_name).get_weights())
            print("Loaded VGG19 layer: " + vgg_layer_name)

    last_epoch = 0

# prepare generators

if use_client_gen:
    train_client = DataGeneratorClient(port=5555, host="localhost", hwm=160, batch_size=10)
    train_client.start()
    train_di = train_client.gen()
    train_samples = 52597

    val_client = DataGeneratorClient(port=5556, host="localhost", hwm=160, batch_size=10)
    val_client.start()
    val_di = val_client.gen()
    val_samples = 2645
else:
    train_di = DataIterator("../dataset/train_dataset.h5", data_shape=(3, 368, 368),
                      mask_shape=(1, 46, 46),
                      label_shape=(57, 46, 46),
                      vec_num=38, heat_num=19, batch_size=batch_size, shuffle=True)
    train_samples=train_di.N
    val_di = DataIterator("../dataset/val_dataset.h5", data_shape=(3, 368, 368),
from keras.callbacks import LearningRateScheduler, ModelCheckpoint, CSVLogger, TensorBoard
from keras.layers.convolutional import Conv2D
from keras.applications.resnet50 import ResNet50

import keras.backend as K

from hm_model import acc_norm

batch_size = 100
train_samples = 2000

WEIGHTS_BEST = "vnect_weights.h5"

val_client = DataGeneratorClient(port=5556,
                                 host="localhost",
                                 hwm=160,
                                 batch_size=batch_size,
                                 with_pafs=False,
                                 stages=1)
val_client.start()
val_di = val_client.gen()
val_samples = 2000

import vnect_model as md
model = md.get_training_model()

# load previous weights or vgg19 if this is the first run
if os.path.exists(WEIGHTS_BEST):
    print("Loading the best weights...")
    model.load_weights(WEIGHTS_BEST)
else:
    raise ("Weights file %s not found " % WEIGHTS_BEST)