Ejemplo n.º 1
0
def main():
    """
    Generate CoreML model for benchmark by using non-trained model.
    It's useful if you just want to measure the inference speed
    of your model
    """
    hack_coremltools()

    sizes = [224, 192, 160, 128]
    alphas = [1., .75, .50, .25]
    name_fmt = 'mobile_unet_{0:}_{1:03.0f}_{2:03.0f}'

    experiments = [{
        'name':
        name_fmt.format(s, a * 100, a * 100),
        'model':
        MobileUNet(input_shape=(s, s, 3),
                   input_tensor=Input(shape=(s, s, 3)),
                   alpha=a,
                   alpha_up=a)
    } for s, a in product(sizes, alphas)]

    for e in experiments:
        model = e['model']
        name = e['name']

        model.summary()

        with CustomObjectScope(custom_objects()):
            coreml_model = coremltools.converters.keras.convert(
                model, input_names='data')
        coreml_model.save('artifacts/{}.mlmodel'.format(name))
Ejemplo n.º 2
0
def main(img_file, weight_file):
    model = MobileUNet(input_shape=(128, 128, 3), alpha=1, alpha_up=0.25)

    model.summary()

    model.load_weights(weight_file, by_name=True)

    img = imread(img_file)
    img = imresize(img, (img_size, img_size))

    mask_file = re.sub('img2.jpg$', 'msk1.png', img_file)

    mask = imread(mask_file)
    mask = imresize(mask, (img_size, img_size))

    batched1 = img.reshape(1, img_size, img_size, 3).astype(float)
    pred1 = model.predict(standardize(batched1)).reshape(img_size, img_size)

    if True:
        plt.subplot(2, 2, 1)
        plt.imshow(img)
        plt.subplot(2, 2, 2)
        plt.imshow(pred1)
        plt.subplot(2, 2, 3)
        plt.imshow(mask)
        plt.show()
def train(img_file, mask_file, top_model_weights_path, epochs, batch_size):
    train_gen, validation_gen, img_shape = load_data(img_file, mask_file)

    img_height = img_shape[0]
    img_width = img_shape[1]
    lr_base = 0.01 * (float(batch_size) / 16)

    model = MobileUNet(input_shape=(img_height, img_width, 3), alpha_up=0.25)
    # model = MobileDeepLab(input_shape=(img_height, img_width, 3))
    model.load_weights(os.path.expanduser(top_model_weights_path),
                       by_name=True)

    # Freeze above conv_dw_12
    for layer in model.layers[:70]:
        layer.trainable = False

    # Freeze above conv_dw_13
    # for layer in model.layers[:76]:
    #     layer.trainable = False

    model.summary()
    model.compile(
        optimizer=SGD(lr=0.0001, momentum=0.9),
        # optimizer=Adam(lr=0.0001),
        loss=dice_coef_loss,
        metrics=[
            dice_coef,
            recall,
            precision,
            'binary_crossentropy',
        ],
    )

    # callbacks
    scheduler = callbacks.LearningRateScheduler(
        create_lr_schedule(epochs, lr_base=lr_base, mode='progressive_drops'))
    tensorboard = callbacks.TensorBoard(log_dir='./logs')
    checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                           save_weights_only=True,
                                           save_best_only=True)

    model.fit_generator(
        generator=train_gen(),
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_gen(),
        validation_steps=nb_validation_samples // batch_size,
        callbacks=[scheduler, tensorboard, checkpoint],
    )

    model.save(trained_model_path)
Ejemplo n.º 4
0
def train(img_file, mask_file, epochs, batch_size):
    # train_gen, validation_gen, img_shape = load_data(img_file, mask_file)

    img_height = 385
    img_width = 385
    lr_base = 0.01 * (float(batch_size) / 16)

    model = MobileUNet(input_shape=(img_height, img_width, 3),
                       alpha=1,
                       alpha_up=0.25)

    model.summary()
    model.compile(
        optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
        # optimizer=Adam(lr=0.001),
        # optimizer=optimizers.RMSprop(),
        loss=dice_coef_loss,
        metrics=[
            dice_coef,
            recall,
            precision,
            'binary_crossentropy',
        ],
    )

    #############
    model.metrics_tensors('activation_1')
    #########

    # callbacks
    scheduler = callbacks.LearningRateScheduler(
        create_lr_schedule(epochs, lr_base=lr_base, mode='progressive_drops'))
    tensorboard = callbacks.TensorBoard(log_dir='./logs')
    csv_logger = callbacks.CSVLogger('logs/training.csv')
    checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                           save_weights_only=True,
                                           save_best_only=True)

    model.fit_generator(
        generator=train_gen(),
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_gen(),
        validation_steps=nb_validation_samples // batch_size,
        # callbacks=[tensorboard, checkpoint, csv_logger],
        callbacks=[scheduler, tensorboard, checkpoint, csv_logger],
    )

    model.save(trained_model_path)
Ejemplo n.º 5
0
def main():
    """
    Benchmark your model in your local pc.
    """
    model = MobileUNet(input_shape=(img_size, img_size, 3))
    inputs = np.random.randn(batch_num, img_size, img_size, 3)

    time_per_batch = []

    for i in range(10):
        start = time.time()
        model.predict(inputs, batch_size=batch_num)
        elapsed = time.time() - start
        time_per_batch.append(elapsed)

    time_per_batch = np.array(time_per_batch)

    # exclude 1st measure
    print(time_per_batch[1:].mean())
    print(time_per_batch[1:].std())
def train(img_file, mask_file, mobilenet_weights_path, epochs, batch_size):
    train_gen, validation_gen, img_shape = load_data(img_file, mask_file)

    img_height = img_shape[0]
    img_width = img_shape[1]

    # model = MobileDeepLab(input_shape=(img_height, img_width, 3))
    model = MobileUNet(input_shape=(img_height, img_width, 3), alpha_up=0.25)
    model.load_weights(os.path.expanduser(
        mobilenet_weights_path.format(img_height)),
                       by_name=True)

    # Freeze mobilenet original weights
    for layer in model.layers[:82]:
        layer.trainable = False

    model.summary()
    model.compile(
        optimizer='rmsprop',
        loss=[dice_coef_loss, dice_coef_loss],
        metrics=[
            dice_coef,
            # recall,
            # precision,
            'binary_crossentropy',
        ],
    )

    model.fit_generator(
        generator=train_gen(),
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_gen(),
        validation_steps=nb_validation_samples // batch_size,
    )

    model.save_weights(top_model_weights_path)
    model.save(transferred_model_path)
Ejemplo n.º 7
0
def main(weight_file):
    model = MobileUNet(input_shape=(128, 128, 3), alpha=1, alpha_up=0.25)

    model.summary()

    model.load_weights(weight_file, by_name=True)

    images = np.load('data/id_pack/images-128.npy')
    masks = np.load('data/id_pack/masks-128.npy')
    # only hair
    masks = masks[:, :, :, 0].reshape(-1, size, size)

    _, images, _, masks = train_test_split(images,
                                           masks,
                                           test_size=0.2,
                                           random_state=seed)

    for img, mask in zip(images, masks):
        batched1 = img.reshape(1, size, size, 3).astype(float)
        batched2 = img.reshape(1, size, size, 3).astype(float)

        t1 = time.time()
        pred1 = model.predict(standardize(batched1)).reshape(size, size)
        elapsed = time.time() - t1
        print('elapsed1: ', elapsed)

        dice = np_dice_coef(mask.astype(float) / 255, pred1)
        print('dice1: ', dice)

        if True:
            plt.subplot(2, 2, 1)
            plt.imshow(img)
            plt.subplot(2, 2, 2)
            plt.imshow(mask)
            plt.subplot(2, 2, 3)
            plt.imshow(pred1)
            plt.show()
def train(epochs, batch_size, img_size):
    fresh_training = True
    alpha_value = 0.9

    img_file = 'data/id_pack/images-{}.npy'.format(img_size)
    mask_file = 'data/id_pack/masks-{}.npy'.format(img_size)
    trained_model_path = 'artifacts/model-{}.h5'.format(img_size)

    print("training on image file:")
    print(img_file)
    print(mask_file)

    # Load the data
    train_gen, validation_gen, img_shape, train_len, val_len = load_data(
        img_file, mask_file)

    img_height = img_shape[0]
    img_width = img_shape[1]

    print(img_height, img_width)

    lr_base = 0.01 * (float(batch_size) / 16)

    if fresh_training:
        model = MobileUNet(input_shape=(img_height, img_width, 3),
                           alpha=alpha_value,
                           alpha_up=0.25)
    else:
        with CustomObjectScope(custom_objects()):
            model = load_model(SAVED_MODEL)

    model.summary()
    model.compile(
        #        optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
        optimizer=optimizers.Adam(lr=0.0001),
        # optimizer=Adam(lr=0.001),
        # optimizer=optimizers.RMSprop(),
        #loss=dice_coef_loss,
        loss='mean_absolute_error',
        #loss = loss_gu,
        metrics=[
            dice_coef, recall, precision, dice_coef_loss, 'mean_absolute_error'
        ],
    )

    # callbacks
    scheduler = callbacks.LearningRateScheduler(
        create_lr_schedule(epochs, lr_base=lr_base, mode='progressive_drops'))
    tensorboard = callbacks.TensorBoard(log_dir='./logs')
    csv_logger = callbacks.CSVLogger('logs/training.csv')
    checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                           save_weights_only=True,
                                           save_best_only=True)
    '''
        looks like we do have some legacy support issue
        the steps_per_epoch and validation_steps is actually the number of sample
        
        legacy_generator_methods_support = generate_legacy_method_interface(
            allowed_positional_args=['generator', 'steps_per_epoch', 'epochs'],
            conversions=[('samples_per_epoch', 'steps_per_epoch'),
                        ('val_samples', 'steps'),
                        ('nb_epoch', 'epochs'),
        ('nb_val_samples', 'validation_steps'),
    '''
    nb_train_samples = train_len
    nb_validation_samples = val_len

    print("training sample is ", nb_train_samples)

    if fresh_training:
        cb_list = [scheduler, tensorboard, checkpoint, csv_logger]
    else:
        cb_list = [tensorboard, checkpoint, csv_logger]

    model.fit_generator(
        generator=train_gen(),
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_gen(),
        validation_steps=nb_validation_samples // batch_size,
        callbacks=cb_list,
    )

    model.save(trained_model_path)
Ejemplo n.º 9
0
def train(img_file, mask_file, epochs, batch_size):
    train_gen, validation_gen, img_shape, train_len, val_len = load_data(img_file, mask_file)

    img_height = img_shape[0]
    img_width = img_shape[1]
    lr_base = 0.01 * (float(batch_size) / 16)
    
    print(img_height, img_width)

    model = MobileUNet(input_shape=(img_height, img_width, 3),
                       alpha=1,
                       alpha_up=0.25)

    model.summary()
    model.compile(
        optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
        # optimizer=Adam(lr=0.001),
        # optimizer=optimizers.RMSprop(),
        #loss=dice_coef_loss,
        loss='mean_squared_error',
        metrics=[
            dice_coef,
            recall,
            precision,
            'binary_crossentropy',
        ],
    )

    # callbacks
    scheduler = callbacks.LearningRateScheduler(
        create_lr_schedule(epochs, lr_base=lr_base, mode='progressive_drops'))
    tensorboard = callbacks.TensorBoard(log_dir='./logs')
    csv_logger = callbacks.CSVLogger('logs/training.csv')
    checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                           save_weights_only=True,
                                           save_best_only=True)
    
    '''
        looks like we do have some legacy support issue
        the steps_per_epoch and validation_steps is actually the number of sample
        
        legacy_generator_methods_support = generate_legacy_method_interface(
            allowed_positional_args=['generator', 'steps_per_epoch', 'epochs'],
            conversions=[('samples_per_epoch', 'steps_per_epoch'),
                        ('val_samples', 'steps'),
                        ('nb_epoch', 'epochs'),
        ('nb_val_samples', 'validation_steps'),
    '''
    nb_train_samples = train_len 
    nb_validation_samples = val_len
    

    model.fit_generator(
        generator=train_gen(),
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_gen(),
        validation_steps=nb_validation_samples // batch_size,
        # callbacks=[tensorboard, checkpoint, csv_logger],
        callbacks=[scheduler, tensorboard, checkpoint, csv_logger],
    )

    model.save(trained_model_path)
from keras import callbacks, optimizers

from data import load_data
from learning_rate import create_lr_schedule
from loss import dice_coef_loss, dice_coef, recall, precision
from nets.MobileUNet import MobileUNet
from nets.MobileUNet import custom_objects
from keras.utils import CustomObjectScope
from keras.models import load_model
from keras import backend as K

from nets.SqueezeNet import SqueezeNet

img_height = img_width = 256

model_MobileUNet = MobileUNet(input_shape=(img_height, img_width, 3), alpha=1, alpha_up=0.25)

model_MobileUNet.summary()
plot_model(model_MobileUNet, to_file="mobile_u_net_model.png", show_shapes=True)

#model_SqueezeNet = SqueezeNet(input_shape=(img_height, img_width, 3), classes=(img_height*img_width))
#plot_model(model_SqueezeNet, to_file="squeeze_net_model.png", show_shapes=True)


#model_SqueezeNet_notop = SqueezeNet(input_shape=(img_height, img_width, 3), include_top=False, pooling='max')
#plot_model(model_SqueezeNet_notop, to_file="squeeze_net_no_top_model.png", show_shapes=True)
'''
img = image.load_img('../images/cat.jpeg', target_size=(227, 227))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
Ejemplo n.º 11
0
def train(coco_path, checkpoint_path, log_path, epochs=100, batch_size=50):
    cat_nms = ['book', 'apple', 'keyboard']

    BATCH_SIZE = batch_size
    NUM_EPOCH = epochs
    IMAGE_W = 224
    IMAGE_H = 224

    model = MobileUNet(input_shape=(IMAGE_H, IMAGE_W, 3),
                       alpha_up=0.25,
                       num_classes=(len(cat_nms) + 1))
    # model.load_weights(os.path.expanduser(mobilenet_weights_path.format(img_height)),
    #            by_name=True)

    # # Freeze mobilenet original weights
    # for layer in model.layers[:82]:
    #     layer.trainable = False

    seed = 1
    np.random.seed(seed)

    training_generator = coco_generator(cat_nms,
                                        coco_path,
                                        batch_size=BATCH_SIZE)
    validation_generator = coco_generator(cat_nms,
                                          coco_path,
                                          subset='val',
                                          batch_size=BATCH_SIZE)

    model.summary()
    if os.path.exists(checkpoint_path):
        model.load_weights(checkpoint_path, by_name=True)

    model.compile(
        optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
        # optimizer=Adam(lr=0.001),
        # optimizer=optimizers.RMSprop(),
        loss=dice_coef_loss,
        metrics=[
            dice_coef,
            recall,
            precision,
            'binary_crossentropy',
        ],
    )

    lr_base = 0.01 * (float(BATCH_SIZE) / 16)

    # callbacks
    scheduler = callbacks.LearningRateScheduler(
        create_lr_schedule(NUM_EPOCH,
                           lr_base=lr_base,
                           mode='progressive_drops'))
    tensorboard = callbacks.TensorBoard(log_dir=log_path)
    csv_logger = callbacks.TensorBoard(log_path)
    checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                           save_weights_only=True,
                                           save_best_only=True)

    # Train model on dataset
    model.fit_generator(
        generator=training_generator,
        validation_data=validation_generator,
        epochs=NUM_EPOCH,
        callbacks=[scheduler, tensorboard, checkpoint, csv_logger],
    )