checkpoint2 = ModelCheckpoint(MODEL_PATH + 'model_loss{}.h5'.format(exp_suffix), monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_weights_only=True) tensorboard = TensorBoard(MODEL_PATH + 'logs{}'.format(fold_id) + '{}'.format(exp_suffix) + '/') # reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, # verbose=1, mode='auto', epsilon=0.0001) # early = EarlyStopping(monitor="val_loss", # mode="min", # patience=6) f1_metric = F1Metric(validation_generator2, 2 * len(valid_indexes) // batch_size, batch_size, 28) #2 times val because of val_aug nb_epochs = epochs[1] nb_cycles = 15 init_lr = 0.001 def _cosine_anneal_schedule(t): cos_inner = np.pi * (t % (nb_epochs // nb_cycles)) cos_inner /= nb_epochs // nb_cycles cos_out = np.cos(cos_inner) + 1 return float(init_lr / 2 * cos_out)
oversample_factor=0) checkpoint = ModelCheckpoint(MODEL_PATH + 'model_{}.h5'.format(exp_suffix), monitor='val_f1_all', verbose=1, save_best_only=True, mode='max', save_weights_only=True) tensorboard = TensorBoard(MODEL_PATH + 'logs{}_'.format(fold_id) + '{}'.format(exp_suffix) + '/') # reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, # verbose=1, mode='auto', epsilon=0.0001) # early = EarlyStopping(monitor="val_loss", # mode="min", # patience=6) f1_metric = F1Metric(validation_generator, len(valid_indexes) // 1, 1, 28) callbacks_list = [f1_metric, checkpoint, tensorboard] # warm up model model = create_model(input_shape=(SIZE, SIZE, 3), n_out=28) for layer in model.layers: layer.trainable = False #model.layers[2].trainable = True model.layers[-1].trainable = True model.layers[-2].trainable = True model.layers[-3].trainable = True model.layers[-4].trainable = True model.layers[-5].trainable = True model.layers[-6].trainable = True
augument=False) checkpoint = ModelCheckpoint(MODEL_PATH + 'model_{}.h5'.format(exp_suffix), monitor='val_f1_all', verbose=1, save_best_only=True, mode='max', save_weights_only=True) tensorboard = TensorBoard(MODEL_PATH + 'logs{}_'.format(fold_id) + '{}'.format(exp_suffix) + '/') # reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, # verbose=1, mode='auto', epsilon=0.0001) # early = EarlyStopping(monitor="val_loss", # mode="min", # patience=6) f1_metric = F1Metric(validation_generator, len(valid_indexes) // batch_size, batch_size, 28) callbacks_list = [f1_metric, checkpoint, tensorboard] # warm up model model = create_model(input_shape=(SIZE, SIZE, 3), n_out=28) for layer in model.layers: layer.trainable = False #model.layers[2].trainable = True model.layers[-1].trainable = True model.layers[-2].trainable = True model.layers[-3].trainable = True model.layers[-4].trainable = True model.layers[-5].trainable = True model.layers[-6].trainable = True
#ax1.bar(n_keys, [train_sum_vec[k] for k in n_keys]) #ax1.set_title('Training Distribution') #ax2.bar(n_keys, [valid_sum_vec[k] for k in n_keys]) #ax2.set_title('Validation Distribution') # create train and valid datagens aug = False if fold_id == 2: aug = True train_generator = data_generator.create_train(train_dataset_info[train_indexes], batch_size, (SIZE, SIZE, 3), augument=aug) validation_generator = data_generator.create_train(train_dataset_info[valid_indexes], batch_size, (SIZE, SIZE, 3), augument=False) f1_macro = F1Metric(validation_generator,steps=len(valid_indexes)//batch_size,batch_size=batch_size,num_classes=28) # f1_macro resuls in an additional metric "val_f1_all" in history which can be used to checkpoint checkpoint = ModelCheckpoint(MODEL_PATH + 'model_{}.h5'.format(fold_id), monitor='val_f1_all', verbose=1, save_best_only=True, mode='max', save_weights_only=True) tensorboard = TensorBoard(MODEL_PATH + 'logs{}/'.format(fold_id)) callbacks_list = [f1_macro,checkpoint, tensorboard] # warm up model model = create_model( input_shape=(SIZE, SIZE, 4), n_out=28) for layer in model.layers: layer.trainable = False #model.layers[2].trainable = True model.layers[-1].trainable = True