def train(img_file, mask_file, top_model_weights_path, epochs, batch_size):
    train_gen, validation_gen, img_shape = load_data(img_file, mask_file)

    img_height = img_shape[0]
    img_width = img_shape[1]
    lr_base = 0.01 * (float(batch_size) / 16)

    model = MobileUNet(input_shape=(img_height, img_width, 3), alpha_up=0.25)
    # model = MobileDeepLab(input_shape=(img_height, img_width, 3))
    model.load_weights(os.path.expanduser(top_model_weights_path),
                       by_name=True)

    # Freeze above conv_dw_12
    for layer in model.layers[:70]:
        layer.trainable = False

    # Freeze above conv_dw_13
    # for layer in model.layers[:76]:
    #     layer.trainable = False

    model.summary()
    model.compile(
        optimizer=SGD(lr=0.0001, momentum=0.9),
        # optimizer=Adam(lr=0.0001),
        loss=dice_coef_loss,
        metrics=[
            dice_coef,
            recall,
            precision,
            'binary_crossentropy',
        ],
    )

    # callbacks
    scheduler = callbacks.LearningRateScheduler(
        create_lr_schedule(epochs, lr_base=lr_base, mode='progressive_drops'))
    tensorboard = callbacks.TensorBoard(log_dir='./logs')
    checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                           save_weights_only=True,
                                           save_best_only=True)

    model.fit_generator(
        generator=train_gen(),
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_gen(),
        validation_steps=nb_validation_samples // batch_size,
        callbacks=[scheduler, tensorboard, checkpoint],
    )

    model.save(trained_model_path)
Exemplo n.º 2
0
def train(img_file, mask_file, epochs, batch_size):
    # train_gen, validation_gen, img_shape = load_data(img_file, mask_file)

    img_height = 385
    img_width = 385
    lr_base = 0.01 * (float(batch_size) / 16)

    model = MobileUNet(input_shape=(img_height, img_width, 3),
                       alpha=1,
                       alpha_up=0.25)

    model.summary()
    model.compile(
        optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
        # optimizer=Adam(lr=0.001),
        # optimizer=optimizers.RMSprop(),
        loss=dice_coef_loss,
        metrics=[
            dice_coef,
            recall,
            precision,
            'binary_crossentropy',
        ],
    )

    #############
    model.metrics_tensors('activation_1')
    #########

    # callbacks
    scheduler = callbacks.LearningRateScheduler(
        create_lr_schedule(epochs, lr_base=lr_base, mode='progressive_drops'))
    tensorboard = callbacks.TensorBoard(log_dir='./logs')
    csv_logger = callbacks.CSVLogger('logs/training.csv')
    checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                           save_weights_only=True,
                                           save_best_only=True)

    model.fit_generator(
        generator=train_gen(),
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_gen(),
        validation_steps=nb_validation_samples // batch_size,
        # callbacks=[tensorboard, checkpoint, csv_logger],
        callbacks=[scheduler, tensorboard, checkpoint, csv_logger],
    )

    model.save(trained_model_path)
def train(epochs, batch_size, img_size):
    fresh_training = True
    alpha_value = 0.9

    img_file = 'data/id_pack/images-{}.npy'.format(img_size)
    mask_file = 'data/id_pack/masks-{}.npy'.format(img_size)
    trained_model_path = 'artifacts/model-{}.h5'.format(img_size)

    print("training on image file:")
    print(img_file)
    print(mask_file)

    # Load the data
    train_gen, validation_gen, img_shape, train_len, val_len = load_data(
        img_file, mask_file)

    img_height = img_shape[0]
    img_width = img_shape[1]

    print(img_height, img_width)

    lr_base = 0.01 * (float(batch_size) / 16)

    if fresh_training:
        model = MobileUNet(input_shape=(img_height, img_width, 3),
                           alpha=alpha_value,
                           alpha_up=0.25)
    else:
        with CustomObjectScope(custom_objects()):
            model = load_model(SAVED_MODEL)

    model.summary()
    model.compile(
        #        optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
        optimizer=optimizers.Adam(lr=0.0001),
        # optimizer=Adam(lr=0.001),
        # optimizer=optimizers.RMSprop(),
        #loss=dice_coef_loss,
        loss='mean_absolute_error',
        #loss = loss_gu,
        metrics=[
            dice_coef, recall, precision, dice_coef_loss, 'mean_absolute_error'
        ],
    )

    # callbacks
    scheduler = callbacks.LearningRateScheduler(
        create_lr_schedule(epochs, lr_base=lr_base, mode='progressive_drops'))
    tensorboard = callbacks.TensorBoard(log_dir='./logs')
    csv_logger = callbacks.CSVLogger('logs/training.csv')
    checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                           save_weights_only=True,
                                           save_best_only=True)
    '''
        looks like we do have some legacy support issue
        the steps_per_epoch and validation_steps is actually the number of sample
        
        legacy_generator_methods_support = generate_legacy_method_interface(
            allowed_positional_args=['generator', 'steps_per_epoch', 'epochs'],
            conversions=[('samples_per_epoch', 'steps_per_epoch'),
                        ('val_samples', 'steps'),
                        ('nb_epoch', 'epochs'),
        ('nb_val_samples', 'validation_steps'),
    '''
    nb_train_samples = train_len
    nb_validation_samples = val_len

    print("training sample is ", nb_train_samples)

    if fresh_training:
        cb_list = [scheduler, tensorboard, checkpoint, csv_logger]
    else:
        cb_list = [tensorboard, checkpoint, csv_logger]

    model.fit_generator(
        generator=train_gen(),
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_gen(),
        validation_steps=nb_validation_samples // batch_size,
        callbacks=cb_list,
    )

    model.save(trained_model_path)
Exemplo n.º 4
0
def train(img_file, mask_file, epochs, batch_size):
    train_gen, validation_gen, img_shape, train_len, val_len = load_data(img_file, mask_file)

    img_height = img_shape[0]
    img_width = img_shape[1]
    lr_base = 0.01 * (float(batch_size) / 16)
    
    print(img_height, img_width)

    model = MobileUNet(input_shape=(img_height, img_width, 3),
                       alpha=1,
                       alpha_up=0.25)

    model.summary()
    model.compile(
        optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
        # optimizer=Adam(lr=0.001),
        # optimizer=optimizers.RMSprop(),
        #loss=dice_coef_loss,
        loss='mean_squared_error',
        metrics=[
            dice_coef,
            recall,
            precision,
            'binary_crossentropy',
        ],
    )

    # callbacks
    scheduler = callbacks.LearningRateScheduler(
        create_lr_schedule(epochs, lr_base=lr_base, mode='progressive_drops'))
    tensorboard = callbacks.TensorBoard(log_dir='./logs')
    csv_logger = callbacks.CSVLogger('logs/training.csv')
    checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                           save_weights_only=True,
                                           save_best_only=True)
    
    '''
        looks like we do have some legacy support issue
        the steps_per_epoch and validation_steps is actually the number of sample
        
        legacy_generator_methods_support = generate_legacy_method_interface(
            allowed_positional_args=['generator', 'steps_per_epoch', 'epochs'],
            conversions=[('samples_per_epoch', 'steps_per_epoch'),
                        ('val_samples', 'steps'),
                        ('nb_epoch', 'epochs'),
        ('nb_val_samples', 'validation_steps'),
    '''
    nb_train_samples = train_len 
    nb_validation_samples = val_len
    

    model.fit_generator(
        generator=train_gen(),
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_gen(),
        validation_steps=nb_validation_samples // batch_size,
        # callbacks=[tensorboard, checkpoint, csv_logger],
        callbacks=[scheduler, tensorboard, checkpoint, csv_logger],
    )

    model.save(trained_model_path)
Exemplo n.º 5
0
def train(coco_path, checkpoint_path, log_path, epochs=100, batch_size=50):
    cat_nms = ['book', 'apple', 'keyboard']

    BATCH_SIZE = batch_size
    NUM_EPOCH = epochs
    IMAGE_W = 224
    IMAGE_H = 224

    model = MobileUNet(input_shape=(IMAGE_H, IMAGE_W, 3),
                       alpha_up=0.25,
                       num_classes=(len(cat_nms) + 1))
    # model.load_weights(os.path.expanduser(mobilenet_weights_path.format(img_height)),
    #            by_name=True)

    # # Freeze mobilenet original weights
    # for layer in model.layers[:82]:
    #     layer.trainable = False

    seed = 1
    np.random.seed(seed)

    training_generator = coco_generator(cat_nms,
                                        coco_path,
                                        batch_size=BATCH_SIZE)
    validation_generator = coco_generator(cat_nms,
                                          coco_path,
                                          subset='val',
                                          batch_size=BATCH_SIZE)

    model.summary()
    if os.path.exists(checkpoint_path):
        model.load_weights(checkpoint_path, by_name=True)

    model.compile(
        optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
        # optimizer=Adam(lr=0.001),
        # optimizer=optimizers.RMSprop(),
        loss=dice_coef_loss,
        metrics=[
            dice_coef,
            recall,
            precision,
            'binary_crossentropy',
        ],
    )

    lr_base = 0.01 * (float(BATCH_SIZE) / 16)

    # callbacks
    scheduler = callbacks.LearningRateScheduler(
        create_lr_schedule(NUM_EPOCH,
                           lr_base=lr_base,
                           mode='progressive_drops'))
    tensorboard = callbacks.TensorBoard(log_dir=log_path)
    csv_logger = callbacks.TensorBoard(log_path)
    checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                           save_weights_only=True,
                                           save_best_only=True)

    # Train model on dataset
    model.fit_generator(
        generator=training_generator,
        validation_data=validation_generator,
        epochs=NUM_EPOCH,
        callbacks=[scheduler, tensorboard, checkpoint, csv_logger],
    )
Exemplo n.º 6
0
def main(name, dataset, epochs, batch_size, learning_rate, attention, n_iter,
         enc_dim, dec_dim, z_dim, oldmodel, live_plotting):

    if name is None:
        name = dataset

    channels, height, width = 1, 28, 28
    x_dim = channels * height * width

    # Configure attention mechanism
    if attention != "":
        read_N, write_N = attention.split(",")

        read_N = int(read_N)
        write_N = int(write_N)

        reader = AttentionReader(channels, height, width, read_N)
        writer = AttentionWriter(channels, height, width, write_N)
        attention_tag = "r%d-w%d" % (read_N, write_N)
    else:
        reader = Reader(channels, height, width)
        writer = Writer(x_dim, channels, height, width)

        attention_tag = "full"

    lr_str = "%1.0e" % learning_rate
    result_dir = os.path.join("./", "result")
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    longname = "%s-%s-t%d-enc%d-dec%d-z%d-lr%s" % \
               (dataset, attention_tag, n_iter, enc_dim, dec_dim, z_dim, lr_str)

    print("\nRunning experiment %s" % longname)
    print("               dataset: %s" % dataset)
    print("          subdirectory: %s" % result_dir)
    print("         learning rate: %g" % learning_rate)
    print("             attention: %s" % attention)
    print("          n_iterations: %d" % n_iter)
    print("     encoder dimension: %d" % enc_dim)
    print("           z dimension: %d" % z_dim)
    print("     decoder dimension: %d" % dec_dim)
    print("            batch size: %d" % batch_size)
    print("                epochs: %d" % epochs)
    print()

    # ----------------------------------------------------------------------

    encoder_rnn = tf.nn.rnn_cell.LSTMCell(enc_dim, state_is_tuple=True)
    decoder_rnn = tf.nn.rnn_cell.LSTMCell(dec_dim, state_is_tuple=True)
    sampler = Qsampler(z_dim)

    draw = DrawModel(batch_size,
                     x_dim,
                     z_dim,
                     dec_dim,
                     n_iter=n_iter,
                     reader=reader,
                     encoder_rnn=encoder_rnn,
                     sampler=sampler,
                     decoder_rnn=decoder_rnn,
                     writer=writer)

    # -------------------------------------------------------------------------

    x = tf.placeholder(tf.float32, shape=(batch_size, x_dim))
    lr = tf.placeholder(tf.float32, shape=[])

    x_recons, kl_term, xs, c_ws = draw.reconstruct(x)

    def binary_crossentropy(t, o):
        eps = 1E-8
        return -(t * tf.log(o + eps) + (1.0 - t) * tf.log(1.0 - o + eps))

    Lx = tf.reduce_mean(
        tf.reduce_sum(binary_crossentropy(x, tf.nn.sigmoid(x_recons)), 1))

    # Lx = tf.losses.sigmoid_cross_entropy(x, logits=x_recons)
    Lz = tf.reduce_mean(kl_term)
    cost = Lx + Lz

    # -----------------------------------------------------------------------
    optim = tf.train.AdamOptimizer(lr, beta1=0.5)
    gradients_and_vars = optim.compute_gradients(cost)
    gradients_and_vars = [(grad_var[0] if grad_var[0] is None else
                           tf.clip_by_norm(grad_var[0], 5), grad_var[1])
                          for grad_var in gradients_and_vars]

    train_opt = optim.apply_gradients(gradients_and_vars)
    sample_x, sample_rg = draw.sample()
    # --------------------------------------------------------------
    data_directory = os.path.join("./", "mnist")
    if not os.path.exists(data_directory):
        os.makedirs(data_directory)
    train_data = mnist.input_data.read_data_sets(
        data_directory, one_hot=True).train  # (samples, height, width)

    # ----------------------------------------------------
    sess_config = tf.ConfigProto(allow_soft_placement=True,
                                 log_device_placement=True)
    sess_config.gpu_options.allow_growth = True
    sess_config.gpu_options.per_process_gpu_memory_fraction = 0.9
    saver = tf.train.Saver(max_to_keep=2)
    process = Process()
    with tf.Session(config=sess_config) as sess:
        '''
        Load from checkpoint or start a new session

        '''
        sess.run(tf.global_variables_initializer())
        losses = []
        lr_schedule = create_lr_schedule(lr_base=learning_rate,
                                         decay_rate=0.1,
                                         decay_epochs=5000,
                                         truncated_epoch=15000,
                                         mode="exp")
        for epoch in range(epochs):
            process.start_epoch()
            lr_epoch = lr_schedule(epoch)

            # Given data batch to Graph
            x_batch, _ = train_data.next_batch(batch_size)
            x_batch = (x_batch > 0.5).astype(np.float32)

            lx, lz, tl, _ = sess.run([Lx, Lz, cost, train_opt],
                                     feed_dict={
                                         x: x_batch,
                                         lr: lr_epoch
                                     })

            losses.append([lx, lz, tl])

            if epoch % 10 == 0 or epoch == epochs - 1:
                process.format_meter(epoch, epochs, {
                    "Lx": lx,
                    "Lz": lz,
                    "Cost": tl
                })

        np.save(result_dir + "/draw_losses.npy", np.array(losses))
        saver.save(sess,
                   os.path.join(result_dir, "model_ckpt"),
                   global_step=epochs)
        print("Results have been saved")

        # -----------------------------------------------------------

        # canvases: shape(n_iter, batch_size, x_dim)
        # rg: shape(n_iter, batch_size, 4)
        x_batch, _ = train_data.next_batch(batch_size)
        x_batch = (x_batch > 0.5).astype(np.float32)
        canvases_gen, rg_gen = sess.run([sample_x, sample_rg], feed_dict={})
        canvases_trn, rg_trn = sess.run([xs, c_ws], feed_dict={x: x_batch})
        canvases_gen, rg_gen = np.array(canvases_gen), np.array(rg_gen)
        canvases_trn, rg_trn = np.array(canvases_trn), np.array(rg_trn)
        pickle_save([canvases_gen, rg_gen, canvases_trn, rg_trn],
                    ["Generate", "Train"], "./result/draw.pkl")