def trainModel(train_data_generator, val_data_generator, model, initial_epoch):
    """
    Model training.

    # Arguments
       train_data_generator: Training data generated batch by batch.
       val_data_generator: Validation data generated batch by batch.
       model: A Model instance.
       initial_epoch: Epoch from which training starts.
    """
    # Configure training process
    model.compile(
        loss='binary_crossentropy',
        optimizer=Adam(lr=resnet_models.lr_schedule(0, FLAGS.initial_lr)),
        metrics=['binary_accuracy'],
        loss_weights=np.ones((15, )).tolist())

    # Save model with the lowest validation loss
    weights_path = os.path.join(FLAGS.experiment_rootdir,
                                'weights_{epoch:03d}.h5')
    writeBestModel = ModelCheckpoint(filepath=weights_path,
                                     monitor='val_loss',
                                     save_best_only=True,
                                     save_weights_only=True)
    tensorboard = TensorBoard(log_dir="logs/{}".format(time()))

    # Save training and validation losses.
    logz.configure_output_dir(FLAGS.experiment_rootdir)
    saveModelAndLoss = log_utils.MyCallback(filepath=FLAGS.experiment_rootdir)

    # Train model
    steps_per_epoch = int(
        np.ceil(train_data_generator.samples / FLAGS.batch_size))
    validation_steps = int(
        np.ceil(val_data_generator.samples / FLAGS.batch_size)) - 1

    lr_scheduler = LearningRateScheduler(resnet_models.lr_schedule)
    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=5,
                                   min_lr=0.5e-6)
    strTime = strftime("%Y%b%d_%Hh%Mm%Ss", localtime(time()))
    #    tensorboard = TensorBoard(log_dir="logs/{}".format(strTime), histogram_freq=10,
    #                              batch_size=32, write_graph=False, write_grads=True,
    #                              write_images=False, embeddings_freq=0,
    #                              embeddings_layer_names=None, embeddings_metadata=None)

    #tensorboard = TensorBoard(log_dir="logs/{}".format(strTime), histogram_freq=10)
    tensorboard = TensorBoard(log_dir="logs/{}".format(strTime),
                              histogram_freq=0)
    callbacks = [
        writeBestModel, saveModelAndLoss, lr_reducer, lr_scheduler, tensorboard
    ]
    model.fit_generator(train_data_generator,
                        epochs=FLAGS.epochs,
                        steps_per_epoch=steps_per_epoch,
                        callbacks=callbacks,
                        validation_data=val_data_generator,
                        validation_steps=validation_steps,
                        initial_epoch=initial_epoch)
Пример #2
0
def trainModel(train_data_generator, val_data_generator, model, initial_epoch):
    """
    Model training.

    # Arguments
       train_data_generator: Training data generated batch by batch.
       val_data_generator: Validation data generated batch by batch.
       model: Target image channels.
       initial_epoch: Dimension of model output.
    """

    # Initialize loss weights
    ##model.alpha = tf.Variable(1, trainable=False, name='alpha', dtype=tf.float32)
    ##model.beta = tf.Variable(0, trainable=False, name='beta', dtype=tf.float32)
    model.beta = tf.Variable(1, trainable=False, name='beta', dtype=tf.float32)

    # Initialize number of samples for hard-mining
    ##model.k_mse = tf.Variable(FLAGS.batch_size, trainable=False, name='k_mse', dtype=tf.int32)
    model.k_entropy = tf.Variable(FLAGS.batch_size,
                                  trainable=False,
                                  name='k_entropy',
                                  dtype=tf.int32)

    optimizer = optimizers.Adam(decay=1e-5)

    # Configure training process
    ##model.compile(loss=[utils.hard_mining_mse(model.k_mse),
    ##                    utils.hard_mining_entropy(model.k_entropy)],
    ##                    optimizer=optimizer, loss_weights=[model.alpha, model.beta])
    model.compile(loss=utils.hard_mining_entropy(model.k_entropy),
                  optimizer=optimizer)

    # Save model with the lowest validation loss
    weights_path = os.path.join(FLAGS.experiment_rootdir,
                                'weights_{epoch:03d}.h5')
    writeBestModel = ModelCheckpoint(filepath=weights_path,
                                     monitor='val_loss',
                                     save_best_only=True,
                                     save_weights_only=True)

    # Save model every 'log_rate' epochs.
    # Save training and validation losses.
    logz.configure_output_dir(FLAGS.experiment_rootdir)
    saveModelAndLoss = log_utils.MyCallback(filepath=FLAGS.experiment_rootdir,
                                            period=FLAGS.log_rate,
                                            batch_size=FLAGS.batch_size)

    # Train model
    steps_per_epoch = int(
        np.ceil(train_data_generator.samples / FLAGS.batch_size))
    validation_steps = int(
        np.ceil(val_data_generator.samples / FLAGS.batch_size))

    model.fit_generator(train_data_generator,
                        epochs=FLAGS.epochs,
                        steps_per_epoch=steps_per_epoch,
                        callbacks=[writeBestModel, saveModelAndLoss],
                        validation_data=val_data_generator,
                        validation_steps=validation_steps,
                        initial_epoch=initial_epoch)
def trainModel(train_data_generator, val_data_generator, model, initial_epoch):
    """
    Model training.
    # Arguments
       train_data_generator: Training data generated batch by batch.
       val_data_generator: Validation data generated batch by batch.
       model: A Model instance.
       initial_epoch: Epoch from which training starts.
    """
    # Configure training process
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=cifar10_resnet_mod.lr_schedule(0)),
                  metrics=['categorical_accuracy'])

    # Save model with the lowest validation loss
    weights_path = os.path.join(FLAGS.experiment_rootdir,
                                'weights_{epoch:03d}.h5')
    writeBestModel = ModelCheckpoint(filepath=weights_path,
                                     monitor='val_loss',
                                     save_best_only=True,
                                     save_weights_only=True)

    # Save training and validation losses.
    logz.configure_output_dir(FLAGS.experiment_rootdir)
    saveModelAndLoss = log_utils.MyCallback(filepath=FLAGS.experiment_rootdir)

    # Train model
    steps_per_epoch = int(
        np.ceil(train_data_generator.samples / FLAGS.batch_size))
    validation_steps = int(
        np.ceil(val_data_generator.samples / FLAGS.batch_size)) - 1

    lr_scheduler = LearningRateScheduler(cifar10_resnet_mod.lr_schedule)

    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=5,
                                   min_lr=0.5e-6)

    # TENSORBOARD SIRVE PARA VISUALIZAR LOS RESULTADOS DE LAS ITERACIONES
    # HASTA AQUI SE HARÁ UN PROCESO ITERATIVO QUE GUARDARÁ DE TODOS LOS CASOS
    # EL MODELO CON EL MEJOR RESULTADO SOBRE LOS DATOS DE VALIDACIÓN
    strTime = strftime("%Y%b%d_%Hh%Mm%Ss", localtime(time()))
    tensorboard = TensorBoard(log_dir="logs/{}".format(strTime),
                              histogram_freq=0)
    callbacks = [
        writeBestModel, saveModelAndLoss, lr_reducer, lr_scheduler, tensorboard
    ]

    model.fit_generator(train_data_generator,
                        epochs=FLAGS.epochs,
                        steps_per_epoch=steps_per_epoch,
                        callbacks=callbacks,
                        validation_data=val_data_generator,
                        validation_steps=validation_steps,
                        initial_epoch=initial_epoch)
Пример #4
0
def train_model(train_generator, val_generator, model, initial_epoch):

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=lr_schedule(0)),
                  metrics=['accuracy'])

    # Save model with the lowest validation loss
    weights_path = os.path.join(FLAGS.experiment_rootdir,
                                'weights_{epoch:03d}.h5')
    write_best_model = ModelCheckpoint(filepath=weights_path,
                                       monitor='val_loss',
                                       save_best_only=True,
                                       save_weights_only=True)

    # Save training and validation losses.
    logz.configure_output_dir(FLAGS.experiment_rootdir)
    save_model_and_loss = log_utils.MyCallback(
        filepath=FLAGS.experiment_rootdir)

    # Train model
    lr_scheduler = LearningRateScheduler(lr_schedule, verbose=FLAGS.verbose)

    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=5,
                                   verbose=FLAGS.verbose,
                                   min_lr=0.5e-6)
    # earlystopping = EarlyStopping(monitor='val_loss', patience=3, verbose=FLAGS.verbose)

    str_time = strftime("%Y%b%d_%Hh%Mm%Ss", localtime(time()))
    tensorboard = TensorBoard(log_dir="logs/{}".format(str_time),
                              histogram_freq=0)

    callbacks = [
        write_best_model, save_model_and_loss, lr_reducer, lr_scheduler,
        tensorboard
    ]

    model.fit_generator(train_generator,
                        validation_data=val_generator,
                        epochs=FLAGS.epochs,
                        verbose=FLAGS.verbose,
                        callbacks=callbacks,
                        initial_epoch=initial_epoch,
                        use_multiprocessing=True)
Пример #5
0
def trainModel(train_data_generator, val_data_generator, model, initial_epoch):
    """
    Model training.
    # Arguments
       train_data_generator: Training data generated batch by batch.
       val_data_generator: Validation data generated batch by batch.
       model: A Model instance.
       initial_epoch: Epoch from which training starts.
    """

    # Configure training process
    optimizer = keras.optimizers.Adam(lr=FLAGS.initial_lr, decay=1e-6)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['binary_accuracy'],
                  loss_weights=np.ones((21, )).tolist())

    # Save model with the lowest validation loss
    weights_path = os.path.join(FLAGS.experiment_rootdir,
                                'weights_{epoch:03d}.h5')
    writeBestModel = ModelCheckpoint(filepath=weights_path,
                                     monitor='val_loss',
                                     save_best_only=True,
                                     save_weights_only=True)
    tensorboard = TensorBoard(log_dir="logs/{}".format(time()))

    # Save training and validation losses.
    logz.configure_output_dir(FLAGS.experiment_rootdir)
    saveModelAndLoss = log_utils.MyCallback(filepath=FLAGS.experiment_rootdir)

    # Train model
    steps_per_epoch = int(
        np.ceil(train_data_generator.samples / FLAGS.batch_size))
    validation_steps = int(
        np.ceil(val_data_generator.samples / FLAGS.batch_size)) - 1

    model.fit_generator(
        train_data_generator,
        epochs=FLAGS.epochs,
        steps_per_epoch=steps_per_epoch,
        callbacks=[writeBestModel, saveModelAndLoss, tensorboard],
        validation_data=val_data_generator,
        validation_steps=validation_steps,
        initial_epoch=initial_epoch)
Пример #6
0
def trainModel(train_data_generator, val_data_generator, model, initial_epoch):
    """
    Model training.

    # Arguments
       train_data_generator: Training data generated batch by batch.
       val_data_generator: Validation data generated batch by batch.
       model: Target image channels.
       initial_epoch: Dimension of model output.
    """

    # Initialize loss weights
    model.alpha = tf.Variable(1,
                              trainable=False,
                              name='alpha',
                              dtype=tf.float32)
    model.beta = tf.Variable(1,
                             trainable=False,
                             name='betabeta',
                             dtype=tf.float32)

    # Initialize number of samples for hard-mining
    model.k_mse = tf.Variable(FLAGS.batch_size,
                              trainable=False,
                              name='k_mse',
                              dtype=tf.int32)
    model.k_entropy = tf.Variable(FLAGS.batch_size,
                                  trainable=False,
                                  name='k_entropy',
                                  dtype=tf.int32)

    optimizer = optimizers.Adam(lr=0.00001, decay=1e-5)
    # optimizer = optimizers.Nadam(lr=0.000002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
    # optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-6)
    # Configure training process
    # model.compile(loss=[utils.mean_log_Gaussian_like_with_sigma_supress,
    #                    utils.hard_mining_mse_translation(model.k_mse)],
    #                    optimizer=optimizer, loss_weights=[model.alpha, model.beta])

    model.compile(loss=[
        utils.mean_log_Gaussian_like_with_sigma_supress,
        utils.hard_mining_mse_translation(model.k_mse)
    ],
                  optimizer=optimizer,
                  loss_weights=[model.alpha, model.beta],
                  metrics={
                      'direct_output': utils.direction_acc,
                      'trans_output': utils.trans_acc
                  })

    s_time = time.strftime("%Y%m%d%H%M%S", time.localtime())  # time stamp
    # logs dir
    logs_path = './logs/log_%s' % (s_time)

    try:
        os.makedirs(logs_path)
    except:
        pass

    # 将loss ,acc, val_loss ,val_acc记录tensorboard
    tensorboard = TensorBoard(log_dir=logs_path, write_graph=True)

    # Save model with the lowest validation loss
    weights_path = os.path.join(FLAGS.experiment_rootdir,
                                'weights_{epoch:03d}.h5')
    writeBestModel = ModelCheckpoint(filepath=weights_path,
                                     monitor='val_loss',
                                     save_best_only=True,
                                     save_weights_only=True)

    # Save model every 'log_rate' epochs.
    # Save training and validation losses.
    logz.configure_output_dir(FLAGS.experiment_rootdir)
    saveModelAndLoss = log_utils.MyCallback(filepath=FLAGS.experiment_rootdir,
                                            period=FLAGS.log_rate,
                                            batch_size=FLAGS.batch_size)

    # Train model
    steps_per_epoch = int(
        np.ceil(train_data_generator.samples / FLAGS.batch_size))
    validation_steps = int(
        np.ceil(val_data_generator.samples / FLAGS.batch_size))

    model.fit_generator(
        train_data_generator,
        epochs=FLAGS.epochs,
        steps_per_epoch=steps_per_epoch,
        callbacks=[writeBestModel, saveModelAndLoss, tensorboard],
        validation_data=val_data_generator,
        validation_steps=validation_steps,
        initial_epoch=initial_epoch)
Пример #7
0
model.k_mse = tf.Variable(32, trainable=False, name='k_mse', dtype=tf.int32)
#model.k_entropy = tf.Variable(32, trainable=False, name='k_entropy', dtype=tf.int32)

model.compile(
    loss=[utils.hard_mining_mse(model.k_mse)],
    #utils.hard_mining_entropy(model.k_entropy)],
    optimizer=optimizer,
    loss_weights=[model.alpha])
#loss_weights=[model.alpha, model.beta])

# Save model with the lowest validation loss
weights_path = os.path.join(OUTPUT_PATH, 'weights_{epoch:03d}.h5')
writeBestModel = ModelCheckpoint(filepath=weights_path,
                                 monitor='val_loss',
                                 save_best_only=True,
                                 save_weights_only=True)
# Save model every 'log_rate' epochs.
# Save training and validation losses.
logz.configure_output_dir(OUTPUT_PATH)
saveModelAndLoss = log_utils.MyCallback(filepath=OUTPUT_PATH,
                                        period=10,
                                        batch_size=32)

model.fit_generator(train_data_generator,
                    steps_per_epoch=15,
                    epochs=50,
                    callbacks=[writeBestModel, saveModelAndLoss],
                    validation_data=test_data_generator,
                    validation_steps=5,
                    verbose=True)