Ejemplo n.º 1
0
                                     monitor='val_acc',
                                     save_best_only=True,
                                     save_weights_only=True)
  callbacks = [custom_verbose, early_stop, model_checkpoint] if save_best_model else [custom_verbose, early_stop]

  lstm_densenet, skf, histories = train_crossval(create_lstm_densenet_model,
                                                 densenet_features,
                                                 y,
                                                 batch_size=batch_size,
                                                 epochs=epochs,
                                                 callbacks=callbacks,
                                                 n_folds=n_folds,
                                                 save_best_model=save_best_model,
                                                 model_path=model_path)

  plot_histories(histories, 'DenseNet-LSTM, {}-fold cross-validation'.format(n_folds))

else:
  lstm_densenet = load_model(densenet_lstm_model_path)


## TESTING ##

# Get back the train/test split used
skf = StratifiedKFold(n_splits=5, shuffle=False)
labels = np.argmax(y, axis=1)
train_test = [(train, test) for (train,test) in skf.split(y, labels)]
train_idx, test_idx = zip(*train_test)

# Get emotion predictions
test_indices = test_idx[1]
Ejemplo n.º 2
0
	# Create the callbacks
	custom_verbose = CustomVerbose(epochs)
	early_stop = EarlyStopping(patience=100)

	callbacks = [custom_verbose, early_stop]

	lstm_sift, skf, histories = train_crossval(create_lstm_sift_model,
															vgg_sift_features,
															y,
															batch_size=batch_size,
															epochs=epochs,
															callbacks=callbacks,
															n_folds=n_folds,
															save_best_model=save_best_model,
															model_path=trained_model_path)

	print("\nTraining complete.")
	plot_histories(histories, 'VGG-SIFT-LSTM, {}-fold cross-validation'.format(n_folds))


## TESTING ##
model_path = trained_model_path if train else vgg_sift_lstm_model_path

y_pred, y_true = evaluate_model(vgg_sift_features, y, model_path, n_splits=5)

# Plot confusion matrix
cm = confusion_matrix(y_true, y_pred)
plot_confusion_matrix(cm, emotions, title='VGG-SIFT-LSTM  -  MUG', normalize=True)

Ejemplo n.º 3
0
tcnn_model_path = 'models/late_fusion/tcnn/tcnn2.h5'
# tcnn_model_path = 'models/late_fusion/tcnn/squeezenet_tcnn1.h5'
tcnn_features = 'vgg-tcnn'  #'vgg-tcnn' or 'squeezenet-tcnn'

tcnn_top, hist_tcnn = train_leave_one_out(
    create_squeezenet_tcnn_top,  #create_tcnn_top() or create_squeezenet_tcnn_top
    data_path,
    frames_data_path,
    subjects,
    tcnn_features,
    epochs,
    callbacks,
    save_best_model=save_best_model,
    model_path=tcnn_model_path)

plot_histories(hist_phrnn, 'PHRNN Model - ADAS&ME')
plot_histories(hist_tcnn, 'TCNN Model - ADAS&ME')

#### TESTING ####

print('\nTesting model...')
merge_weight = 0.45

# set tcnn_model_path to None to test only PHRNN and vice-versa
y_pred, y_true = evaluate_tcnn_phrnn_model(tcnn_model_path,
                                           phrnn_model_path,
                                           tcnn_features,
                                           phrnn_features,
                                           subjects,
                                           data_path,
                                           frames_data_path,
Ejemplo n.º 4
0
features = 'vgg-sift'
annotations_path = '/Volumes/External/ProjectData/ADAS&ME/ADAS&ME_data/annotated'
files_list = FILES_LIST
class_weight = get_class_weight(files_list, annotations_path)
#class_weight = None

#Start training
#create_lstm_finetuning(pretrained_model_path, optimizers.Adam())
#create_lstm(optimizers.Adam())
lstm, histories = train_leave_one_out(create_lstm_finetuning(pretrained_model_path, optimizers.Adam()),
                                       data_path,
                                       features,
                                       annotations_path,
                                       files_list, 
                                       files_per_batch, 
                                       epochs, 
                                       callbacks, 
                                       class_weight,
                                       save_best_model=save_best_model, 
                                       model_path=model_path)

plot_histories(histories, 'Early Fusion Model - ADAS&ME')


#### TESTING ####

print('\nTesting model...')

y_pred, y_true = evaluate_model(model_path, data_path, annotations_path, files_list, files_per_batch)
print_model_eval_metrics(y_pred, y_true)
Ejemplo n.º 5
0
    custom_verbose = CustomVerbose(epochs)
    early_stop = EarlyStopping(patience=30)
    callbacks = [custom_verbose, early_stop]

    tcnn_top, skf, histories = train_tcnn_crossval(
        create_tcnn_top,
        vgg_features,
        y,
        batch_size=batch_size,
        epochs=epochs,
        callbacks=callbacks,
        n_splits=n_splits,
        save_best_model=save_best_model,
        model_path=model_path)
    print("\nTraining of TCNN complete.")
    plot_histories(histories,
                   'VGG-TCNN, {}-fold cross-validation'.format(n_splits))

    print("Training PHRNN model...")
    batch_size = 32
    epochs = 80
    n_splits = 5
    save_best_model = True
    model_path = 'models/phrnn/phrnn2.h5'

    # Create the callbacks
    custom_verbose = CustomVerbose(epochs)
    early_stop = EarlyStopping(patience=30)
    callbacks = [custom_verbose, early_stop]

    phrnn, skf, histories = train_phrnn_crossval(
        create_phrnn_model(features_per_lm=128),