Esempio n. 1
0
        model.fit(train_data, train_labels, batch_size=batchsize, epochs=10, verbose=2)
        test_predict = model.predict(test_data, batch_size=batchsize, verbose=0)
        fpr, tpr, thresholds = roc_curve(flatten_test_labels, test_predict[:, 1], pos_label=1)
        test_auc_value = auc(fpr, tpr)
        auc_values.append(test_auc_value)
plt.plot(range(0,500,10),auc_values)
plt.title('Test AUC for filtered EEG')
plt.ylabel('AUC')
plt.xlabel('epoch')
plt.show()
"""
# model.save('***.h5')
# generate prediction probabilities
train_predict = model.predict(train_data, batch_size=batchsize, verbose=0)
test_predict = model.predict(test_data, batch_size=batchsize, verbose=0)
print(model.evaluate(test_data, test_labels, batch_size=batchsize))

# Calculate AUC
fpr, tpr, thresholds = roc_curve(flatten_train_labels,
                                 train_predict[:, 1],
                                 pos_label=1)
print("Training auc:")
train_auc_value = auc(fpr, tpr)
print(train_auc_value)

fpr, tpr, thresholds = roc_curve(flatten_test_labels,
                                 test_predict[:, 1],
                                 pos_label=1)
print("Testing auc:")
test_auc_value = auc(fpr, tpr)
print(test_auc_value)
Esempio n. 2
0
                            validation_data=(X_train_valid[test],
                                             y_train_valid[test]),
                            callbacks=[checkpointer],
                            class_weight=class_weights)

    #
    print(
        '------------------------------------------------------------------------'
    )
    print(f'Training for fold {fold_no} ...')

    # Evaluation de la performance sur l'ensemble test
    probs = model.predict(X_train_valid[test])
    preds = probs.argmax(axis=-1)
    scores = model.evaluate(X_train_valid[test],
                            y_train_valid[test],
                            verbose=0)

    auc = roc_auc_score(y_train_valid[test], preds)
    acc_per_fold.append(scores[1] * 100)
    auc_per_fold.append(auc)

    # On passe à un autre pli
    fold_no = fold_no + 1

# Evaluation finale sur l'ensemble de test

probs = model.predict(X_test)
preds = probs.argmax(axis=-1)
acc = np.mean(preds == y_test.argmax(axis=-1))
Esempio n. 3
0
                      optimizer='adam',
                      metrics=['accuracy'])
        history = model.fit(train_data,
                            train_labels,
                            batch_size=batchsize,
                            epochs=300,
                            verbose=2)
    # model.save('***.h5')
    # generate prediction probabilities
    train_predict = model.predict(train_data, batch_size=batchsize, verbose=0)
    validation_predict = model.predict(validation_data,
                                       batch_size=batchsize,
                                       verbose=0)
    test_predict = model.predict(test_data, batch_size=batchsize, verbose=0)
    validation_accuracy = model.evaluate(validation_data,
                                         validation_labels,
                                         batch_size=batchsize)
    print(validation_accuracy)
    test_accuracy = model.evaluate(test_data,
                                   test_labels,
                                   batch_size=batchsize)
    print(test_accuracy)

    # Calculate AUC
    fpr, tpr, thresholds = roc_curve(flatten_train_labels,
                                     train_predict[:, 1],
                                     pos_label=1)
    print("Training auc:")
    train_auc_value = auc(fpr, tpr)
    print(train_auc_value)