def train_model(train_dir, valid_dir, name_optimizer='sgd', learning_rate=1.0, decay_learning_rate=1e-8, all_trainable=False, model_weights_path=None, no_class=200, batch_size=128, epoch=20, tensorboard_dir=None, checkpoint_dir=None): '''Train or retrain model. Args: train_dir: train dataset directory. valid_dir: validation dataset directory. name_optimizer: optimizer method. learning_rate: learning rate. decay_learning_rate: learning rate decay. model_weights_path: path of keras model weights. no_class: number of prediction classes. batch_size: batch size. epoch: training epoch. tensorboard_dir: tensorboard logs directory. If None, dismiss it. checkpoint_dir: checkpoints directory. If None, dismiss it. Returns: Training history. ''' model_bcnn = buil_bcnn(all_trainable=all_trainable, size_height=244, size_width=244, no_class=no_class, name_optimizer=name_optimizer, learning_rate=learning_rate, decay_learning_rate=decay_learning_rate) if model_weights_path: model_bcnn.load_weights(model_weights_path) # Load data train_generator, valid_generator = build_generator(train_dir=train_dir, valid_dir=valid_dir, batch_size=batch_size) # Callbacks callbacks = [] if tensorboard_dir: cb_tersoboard = TensorBoard(log_dir=tensorboard_dir, histogram_freq=0, batch_size=batch_size, write_graph=False) callbacks.append(cb_tersoboard) if checkpoint_dir: cb_checkpoint = ModelCheckpoint(os.path.join( checkpoint_dir, 'Xception_model_{epoch:02d}-{val_acc:.3f}.h5'), save_weights_only=True, monitor='val_acc', verbose=True) callbacks.append(cb_checkpoint) cb_reducer = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=5, min_lr=1e-6, min_delta=1e-3) cb_stopper = EarlyStopping(monitor='val_acc', min_delta=1e-3, patience=20, verbose=0, mode='auto') callbacks += [cb_reducer, cb_stopper] # Train history = model_bcnn.fit_generator(train_generator, steps_per_epoch=100, epochs=epoch, validation_data=valid_generator, validation_steps=50, callbacks=callbacks) model_bcnn.save_weights('./new_model_weights.h5') return history
def train_model(name_optimizer='sgd', learning_rate=0.05, decay_learning_rate=1e-9, all_trainable=True, model_weights_path=None, no_class=10, batch_size=BACTH_SIZE, epoch=300, tensorboard_dir=None, checkpoint_dir=None): '''Train or retrain model. Args: train_dir: train dataset directory. valid_dir: validation dataset directory. name_optimizer: optimizer method. learning_rate: learning rate. decay_learning_rate: learning rate decay. model_weights_path: path of keras model weights. no_class: number of prediction classes. batch_size: batch size. epoch: training epoch. tensorboard_dir: tensorboard logs directory. If None, dismiss it. checkpoint_dir: checkpoints directory. If None, dismiss it. Returns: Training history. ''' model = buil_bcnn(all_trainable=all_trainable, no_class=no_class, name_optimizer=name_optimizer, learning_rate=learning_rate, decay_learning_rate=decay_learning_rate, name_activation='softmax', name_loss='categorical_crossentropy') if model_weights_path: model.load_weights(model_weights_path) # Callbacks callbacks = [] if tensorboard_dir: cb_tersoboard = TensorBoard(log_dir=tensorboard_dir, histogram_freq=0, batch_size=batch_size, write_graph=False) callbacks.append(cb_tersoboard) #if checkpoint_dir: #cb_checkpoint = ModelCheckpoint( #os.path.join(checkpoint_dir, 'model_{epoch:02d}-{val_acc:.3f}.h5'), #save_weights_only=True, #monitor='val_loss', #verbose=True) #callbacks.append(cb_checkpoint) cb_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=1e-6, min_delta=1e-3) cb_stopper = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=10, verbose=0, mode='auto') callbacks += [cb_reducer, cb_stopper] # Train # save best model filepath = "./checkpoint/sgd-weights-improvement-{epoch:02d}-{val_acc:.2f}.h5" checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_acc', verbose=1, save_best_only='True', mode='max', period=1) callback_list = [cb_reducer, checkpoint] train_generator = generate_batch_data_random(train_data, BACTH_SIZE, encoder) validation_generator = generate_batch_data_random(validation_data, BACTH_SIZE, encoder) #loader = DataLoader(datapath=data_dir) # use generator #datagen = loader.generate(batch_size) #iterations = loader.train_size // batch_size history = model.fit_generator(train_generator, validation_data=validation_generator, epochs=epoch, steps_per_epoch=split_ratio / batch_size, validation_steps=(50000 - split_ratio) / batch_size, callbacks=callback_list) return history
test_datagen = ImageDataGenerator(rescale=1. / 255, samplewise_center=True, samplewise_std_normalization=True, rotation_range=100, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='constant', cval=0) print(nbr_augmentation, 'Loading model and weights from training process ...') model = buil_bcnn(all_trainable=False, no_class=2, name_optimizer='sgd', learning_rate=0.0001, decay_learning_rate=1e-8) model.load_weights(weights_path) for idx in range(nbr_augmentation): print('{}th augmentation for testing ...'.format(idx)) random_seed = np.random.random_integers(0, 100000) test_generator = test_datagen.flow_from_directory( test_data_dir, target_size=(img_width, img_height), batch_size=batch_size, shuffle=False, # Important !!! seed=random_seed, classes=None,
def train_model(data_dir, name_optimizer='sgd', learning_rate=1.0, decay_learning_rate=1e-8, all_trainable=False, model_weights_path=None, no_class=100, batch_size=32, epoch=100, tensorboard_dir=None, checkpoint_dir=None): '''Train or retrain model. Args: train_dir: train dataset directory. valid_dir: validation dataset directory. name_optimizer: optimizer method. learning_rate: learning rate. decay_learning_rate: learning rate decay. model_weights_path: path of keras model weights. no_class: number of prediction classes. batch_size: batch size. epoch: training epoch. tensorboard_dir: tensorboard logs directory. If None, dismiss it. checkpoint_dir: checkpoints directory. If None, dismiss it. Returns: Training history. ''' model = buil_bcnn(all_trainable=all_trainable, no_class=no_class, name_optimizer=name_optimizer, learning_rate=learning_rate, decay_learning_rate=decay_learning_rate) if model_weights_path: model.load_weights(model_weights_path) # Load data #train_generator, valid_generator = build_generator( #train_dir=train_dir, #valid_dir=valid_dir, #batch_size=batch_size) # to do: use generator to save memory loader = DataLoader(npypath=data_dir) trainx, trainy, validx, validy = loader.trainx, loader.trainy, loader.testx, loader.testy # Callbacks callbacks = [] if tensorboard_dir: cb_tersoboard = TensorBoard(log_dir=tensorboard_dir, histogram_freq=0, batch_size=batch_size, write_graph=False) callbacks.append(cb_tersoboard) if checkpoint_dir: cb_checkpoint = ModelCheckpoint(os.path.join( checkpoint_dir, 'model_{epoch:02d}-{val_acc:.3f}.h5'), save_weights_only=True, monitor='val_loss', verbose=True) callbacks.append(cb_checkpoint) cb_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=1e-6, min_delta=1e-3) cb_stopper = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=10, verbose=0, mode='auto') callbacks += [cb_reducer, cb_stopper] # Train #history = model.fit_generator( #train_generator, #epochs=epoch, #validation_data=valid_generator, #callbacks=callbacks) history = model.fit(trainx, trainy, epochs=epoch, batch_size=batch_size, validation_data=(validx, validy), verbose=1, shuffle=True) model.save_weights('./new_model_weights.h5') return history