Пример #1
0
def clr_keras_callback(mode=None, base_lr=1e-4, max_lr=1e-3, gamma=0.999994):
    """ Creates keras callback for cyclical learning rate. """
    # keras_contrib = './keras_contrib/callbacks'
    # sys.path.append(keras_contrib)
    from cyclical_learning_rate import CyclicLR
    if mode == 'trng1':
        clr = CyclicLR(base_lr=base_lr, max_lr=max_lr, mode='triangular')
    elif mode == 'trng2':
        clr = CyclicLR(base_lr=base_lr, max_lr=max_lr, mode='triangular2')
    elif mode == 'exp':
        clr = CyclicLR(base_lr=base_lr, max_lr=max_lr, mode='exp_range', gamma=gamma) # 0.99994; 0.99999994; 0.999994
    return clr
Пример #2
0
def feature_train_2d():
    train_array = range(0, 17928)

    train_array = list(train_array)

    train_array = shuffle(train_array)

    validation_array = list(range(0, 1991))

    ckpt_path = 'log/weights-{val_loss:.4f}.hdf5'
    ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_path,
                                              monitor='val_loss',
                                              verbose=1,
                                              save_best_only=True,
                                              mode='min')

    model = PoseNet_50(input_shape=(224, 224, 3))
    adadelta = optimizers.Adadelta(lr=0.05, rho=0.9, decay=0.0)
    # model.load_weights("model_data/weights-0.0753.hdf5")
    model.compile(optimizer=adadelta, loss=euc_dist_keras, metrics=['mae'])
    #lrate = LearningRateScheduler(step_decay)
    clr = CyclicLR(base_lr=float("3.3e-4"),
                   max_lr=0.001,
                   step_size=1900,
                   mode='triangular')
    result = model.fit_generator(
        generator=pose2d_get_further_train_batch(train_array, 8, True),
        steps_per_epoch=2241,
        callbacks=[ckpt, clr],
        epochs=60000,
        verbose=1,
        validation_data=pose2d_get_further_train_batch(validation_array, 8,
                                                       False),
        validation_steps=249,
        workers=1)
Пример #3
0
def train_3d_conv(base_model='', ckpt_model="None"):
    img_path = "/media/disk1/human3.6/H36M-images/images/"
    pose_path = "/media/disk1/human3.6/Annot/"
    ckpt_path = 'log/3d_conv_weights-{val_loss:.4f}.hdf5'
    ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_path,
                                              monitor='val_loss',
                                              verbose=1,
                                              save_best_only=True,
                                              mode='min')
    model = make_seq_model(base_model)
    adam = optimizers.adam(lr=float("1e-4"))
    model.compile(optimizer=adam,
                  loss=euc_joint_dist_loss,
                  metrics=[euc_joint_metrics_dist_keras, metrics_pckh])
    clr = CyclicLR(base_lr=float("1e-7"),
                   max_lr=float("1e-4"),
                   step_size=45288,
                   mode='triangular')
    model.summary()

    if ckpt_model != 'None':
        model.load_weights(ckpt_model)

    result = model.fit_generator(
        generator=get_3d_train_batch(img_path, pose_path),
        steps_per_epoch=45288,
        callbacks=[ckpt, clr],
        epochs=60000,
        verbose=1,
        validation_data=get_3d_Val_batch(img_path, pose_path),
        validation_steps=839,
        workers=1)
Пример #4
0
def train_2d():
    index_array = range(1, 1901)

    index_array = list(index_array)

    index_array = shuffle(index_array)

    validation_array = list(range(1901, 2001))

    ckpt_path = 'log/weights-{val_loss:.4f}.hdf5'
    ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_path,
                                              monitor='val_loss',
                                              verbose=1,
                                              save_best_only=True,
                                              mode='min')

    model = PoseNet_50(input_shape=(224, 224, 3))
    adadelta = optimizers.Adadelta(lr=0.05, rho=0.9, decay=0.0)
    clr = CyclicLR(base_lr=float("3.3e-4"),
                   max_lr=0.05,
                   step_size=1900,
                   mode='triangular')
    model.compile(optimizer=adadelta, loss=euc_dist_keras, metrics=['mae'])
    model.load_weights('D:/dissertation/log/weights-118.9920.hdf5')
    #lrate = LearningRateScheduler(step_decay)
    result = model.fit_generator(
        generator=pose2d_get_train_batch(index_array, 8),
        steps_per_epoch=238,
        callbacks=[ckpt, clr],
        epochs=60000,
        verbose=1,
        validation_data=pose2d_get_train_batch(validation_array, 8),
        validation_steps=52,
        workers=1)
Пример #5
0
def get_cb_cyclic_lr():
    return CyclicLR(base_lr=0.001,
                    max_lr=0.006,
                    step_size=2000.,
                    mode='triangular',
                    gamma=1.,
                    scale_fn=None,
                    scale_mode='cycle')
Пример #6
0
def train_3d_8s(base_model='', ckpt_model='None'):
    train_skip, val_skip = read_skip()
    train_array = list(range(1, 16545))
    train_array = [i for i in train_array if i not in train_skip]
    train_array = shuffle(train_array)

    val_array = list(range(1, 4740))
    val_array = [i for i in val_array if i not in val_skip]
    val_array = shuffle(val_array)

    ckpt_path = 'log/3d_weights_8s-{val_loss:.4f}.hdf5'
    ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_path,
                                              monitor='val_loss',
                                              verbose=1,
                                              save_best_only=True,
                                              mode='min')

    model, stride = resnet50_8s(input_shape=(224, 224, 3),
                                model_input=base_model)
    #adadelta = optimizers.Adadelta(lr=0.05, rho=0.9, decay=0.0)
    adam = optimizers.adam(lr=float("1e-4"))
    model.compile(optimizer=adam,
                  loss=euc_joint_dist_loss,
                  metrics=[euc_joint_metrics_dist_keras, metrics_pckh])
    #lrate = LearningRateScheduler(step_decay)
    clr = CyclicLR(base_lr=float("1e-7"),
                   max_lr=float("1e-4"),
                   step_size=2069,
                   mode='triangular')
    model.summary()
    print(ckpt)
    if ckpt_model != 'None':
        model.load_weights(ckpt_model)
    result = model.fit_generator(
        generator=pose3d_get_train_batch(train_array, 8, True),
        steps_per_epoch=2069,
        callbacks=[ckpt, clr],
        epochs=60000,
        verbose=1,
        validation_data=pose3d_get_train_batch(val_array, 8, False),
        validation_steps=593,
        workers=1)
Пример #7
0
def get_callbacks(path_out, sample_size, batch_size):
    # early stopping callback
    early_stopping = EarlyStopping(monitor='val_loss',
                                   patience=20,
                                   verbose=1,
                                   restore_best_weights=False)

    csv_logger = CSVLogger(f'{path_out}loss_history.log')

    # model checkpoint callback
    # this saves our model architecture + parameters into model.h5
    model_checkpoint = ModelCheckpoint(f'{path_out}model.h5',
                                       monitor='val_loss',
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=False,
                                       mode='auto',
                                       period=1)

    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.5,
                                  patience=4,
                                  min_lr=0.000001,
                                  cooldown=3,
                                  verbose=1)

    lr_scale = 1.
    clr = CyclicLR(base_lr=0.0003 * lr_scale,
                   max_lr=0.001 * lr_scale,
                   step_size=sample_size / batch_size,
                   mode='triangular2')

    stop_on_nan = tensorflow.keras.callbacks.TerminateOnNaN()

    callbacks = [
        early_stopping, clr, stop_on_nan, csv_logger, model_checkpoint
    ]

    return callbacks
Пример #8
0
    drop = 0.5
    epochs_drop = 3.0
    lrate = initial_lrate * math.pow(drop, math.floor(
        (1 + epoch) / epochs_drop))
    return lrate


############
#   Hyperparameter for each model
############
#Model 1:
lr_decay = callbacks.LearningRateScheduler(
    schedule=lr_scheduler
)  #lambda epoch: configuration.lr * (0.9 ** configuration.epochs))
lr = 1e-3
clr_00 = CyclicLR(base_lr=1e-6, max_lr=6e-3, step_size=3200, mode='exp_range')
#Model 2:
#NONE
#from keras_gradient_noise import add_gradient_noise
#opt_noise = add_gradient_noise(optimizers.Adam)
optimizer = optimizers.Adam()  #opt_noise(lr, amsgrad=True)
HED = True
losses = {
    "o1": loss_functions.cross_entropy_balanced,
    "o2": loss_functions.cross_entropy_balanced,
    "o3": loss_functions.cross_entropy_balanced,
    "o4": loss_functions.cross_entropy_balanced,
    "o5": loss_functions.cross_entropy_balanced,
    "ofuse": loss_functions.cross_entropy_balanced,
    "output": loss_functions.dice_coef_loss,
}
Пример #9
0
Y = FileInput(opt.input, 'Y')
print('Y.shape', Y.shape)
# print('Z.shape', Z.shape)

# inputs, outputs = create_output_graph()
if opt.simple:
    inputs, outputs = create_simple_graph()
else:
    inputs, outputs = create_output_graph(n_features=n_features_pf,
                                          n_features_cat=n_features_pf_cat)

lr_scale = 1.

# lr = CyclicLR(base_lr=0.001, max_lr=0.01, step_size=len(Y)/5., mode='triangular2')
clr = CyclicLR(base_lr=0.0003 * lr_scale,
               max_lr=0.001 * lr_scale,
               step_size=len(Y) / batch_size,
               mode='triangular2')

# create the model
model = Model(inputs=inputs, outputs=outputs)
# compile the model
optimizer = optimizers.Adam(lr=1., clipnorm=1.)
# optimizer = optimizers.SGD(lr=0.0001, decay=0., momentum=0., nesterov=False)
# optimizer = AdamW(lr=0.0000, beta_1=0.8, beta_2=0.999, epsilon=None, decay=0., weight_decay=0.000, batch_size=batch_size, samples_per_epoch=int(len(Z)*0.8), epochs=epochs)

model.compile(loss='mse',
              optimizer=optimizer,
              metrics=['mean_absolute_error', 'mean_squared_error'])
# model.compile(loss=custom_loss, optimizer=optimizer,
# metrics=['mean_absolute_error', 'mean_squared_error'])
# print the model summary
Пример #10
0
#parallel_model = multi_gpu_model(model, gpus=4)
#parallel_model.compile(loss='mean_squared_error',
#         optimizer=SGD(lr=0.0001, momentum=0.9),    
#              metrics=['mae',r2])

model.compile(loss='categorical_crossentropy',
             optimizer=SGD(momentum=0.9),
#             optimizer=Adam(lr=0.00001),
#             optimizer=Adam(),
#             optimizer=RMSprop(lr=0.0001),
#             optimizer=Adadelta(),
              metrics=['acc',tf_auc])

# set up a bunch of callbacks to do work during model training..

clr = CyclicLR(base_lr=0.0000001, max_lr=0.0001,
               step_size=2000., mode='triangular2', scale_mode='iterations')

checkpointer = ModelCheckpoint(filepath='Agg_attn_bin.autosave.model.h5', verbose=1, save_weights_only=False, save_best_only=True)
csv_logger = CSVLogger('Agg_attn_bin.training.log')
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.20, patience=40, verbose=1, mode='auto', min_delta=0.0001, cooldown=3, min_lr=0.000000001)
early_stop = EarlyStopping(monitor='val_loss', patience=200, verbose=1, mode='auto')



#history = parallel_model.fit(X_train, Y_train,

history = model.fit(X_train, Y_train, class_weight=d_class_weights,
                    batch_size=BATCH,
                    epochs=EPOCH,
                    verbose=1,
                    validation_data=(X_test, Y_test),