Exemplo n.º 1
0
def test_trialwise_predict_and_predict_proba():
    preds = np.array([
        [0.125, 0.875],
        [1., 0.],
        [0.8, 0.2],
        [0.9, 0.1],
    ])
    clf = EEGClassifier(MockModule(preds), optimizer=optim.Adam, batch_size=32)
    clf.initialize()
    np.testing.assert_array_equal(preds.argmax(1), clf.predict(MockDataset()))
    np.testing.assert_array_equal(preds, clf.predict_proba(MockDataset()))
Exemplo n.º 2
0
def test_cropped_predict_and_predict_proba_not_aggregate_predictions():
    preds = np.array([
        [[0.2, 0.1, 0.1, 0.1], [0.8, 0.9, 0.9, 0.9]],
        [[1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
        [[1.0, 1.0, 1.0, 0.2], [0.0, 0.0, 0.0, 0.8]],
        [[0.9, 0.8, 0.9, 1.0], [0.1, 0.2, 0.1, 0.0]],
    ])
    clf = EEGClassifier(MockModule(preds),
                        cropped=True,
                        criterion=CroppedLoss,
                        criterion__loss_function=nll_loss,
                        optimizer=optim.Adam,
                        batch_size=32,
                        aggregate_predictions=False)
    clf.initialize()
    np.testing.assert_array_equal(preds.argmax(1), clf.predict(MockDataset()))
    np.testing.assert_array_equal(preds, clf.predict_proba(MockDataset()))
Exemplo n.º 3
0
def test_cropped_predict_and_predict_proba():
    preds = np.array([
        [[0.2, 0.1, 0.1, 0.1], [0.8, 0.9, 0.9, 0.9]],
        [[1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
        [[1.0, 1.0, 1.0, 0.2], [0.0, 0.0, 0.0, 0.8]],
        [[0.9, 0.8, 0.9, 1.0], [0.1, 0.2, 0.1, 0.0]],
    ])
    clf = EEGClassifier(MockModule(preds),
                        cropped=True,
                        criterion=CroppedLoss,
                        criterion__loss_function=nll_loss,
                        optimizer=optim.Adam,
                        batch_size=32)
    clf.initialize()
    # for cropped decoding classifier returns one label for each trial (averaged over all crops)
    np.testing.assert_array_equal(
        preds.mean(-1).argmax(1), clf.predict(MockDataset()))
    # for cropped decoding classifier returns values for each trial (average over all crops)
    np.testing.assert_array_equal(preds.mean(-1),
                                  clf.predict_proba(MockDataset()))
df[['train_bal_acc', 'valid_bal_acc']].plot(color=['r', 'b'], ax=ax2)
ax1.set_ylabel('Loss')
ax2.set_ylabel('Balanced accuracy')
ax1.legend(['Train', 'Valid'])
ax2.legend(['Train', 'Valid'])
fig.tight_layout()
plt.show()

######################################################################
# Finally, we also display the confusion matrix and classification report:
#

from sklearn.metrics import confusion_matrix, classification_report

y_true = [valid_set[[i]][1][0] for i in range(len(valid_sampler))]
y_pred = clf.predict(valid_set)

print(confusion_matrix(y_true, y_pred))
print(classification_report(y_true, y_pred))

######################################################################
# The model was able to learn despite the low amount of data that was available
# (only two recordings in this example) and reached a balanced accuracy of
# about 43% in a 5-class classification task (chance-level = 20%) on held-out
# data over 10 epochs.
#
# .. note::
#    To further improve performance, the number of epochs should be increased.
#    It has been reduced here for faster run-time in document generation. In
#    testing, 10 epochs provided reasonable performance with around 89% balanced
#    accuracy on training data and around 43% on held out validation data.