Пример #1
0
def init_model():
    global model
    classes_num = len(eventDescription_offline_paradigm)
    # configure the EEGNet-8,2,16 model with kernel length of 257 samples (other
    # model configurations may do better, but this is a good starting point)
    # class num is one
    model = EEGNet(nb_classes=classes_num,
                   Chans=chans,
                   Samples=samples,
                   dropoutRate=0.8,
                   kernLength=128,
                   F1=8,
                   D=2,
                   F2=16)
    model.summary()
def evaluate_subject_models(data, labels, modelpath, subject):
    """
    Trains and evaluates EEgNet for a given subject in the P300 Speller database
    using repeated stratified K-fold cross validation.
    """
    n_sub = data.shape[0]
    n_ex_sub = data.shape[1]
    n_samples = data.shape[2]
    n_channels = data.shape[3]
    aucs = np.zeros(5 * 10)
    print("Training for subject {0}: ".format(subject))
    cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=10, random_state=123)
    for k, (t, v) in enumerate(cv.split(data[subject], labels[subject])):
        X_train, y_train, X_test, y_test = data[subject, t, :, :], labels[
            subject, t], data[subject, v, :, :], labels[subject, v]
        X_train, X_valid, y_train, y_valid = train_test_split(X_train,
                                                              y_train,
                                                              test_size=0.2,
                                                              shuffle=True,
                                                              random_state=456)
        print(
            'Partition {0}: X_train = {1}, X_valid = {2}, X_test = {3}'.format(
                k, X_train.shape, X_valid.shape, X_test.shape))

        # channel-wise feature standarization
        sc = EEGChannelScaler(n_channels=n_channels)
        X_train = np.swapaxes(
            sc.fit_transform(X_train)[:, np.newaxis, :], 2, 3)
        X_valid = np.swapaxes(sc.transform(X_valid)[:, np.newaxis, :], 2, 3)
        X_test = np.swapaxes(sc.transform(X_test)[:, np.newaxis, :], 2, 3)

        model = EEGNet(2, Chans=n_channels, Samples=n_samples)
        print(model.summary())
        model.compile(optimizer='adam', loss='categorical_crossentropy')

        # Early stopping setting also follows EEGNet (Lawhern et al., 2018)
        es = EarlyStopping(monitor='val_loss',
                           mode='min',
                           patience=50,
                           restore_best_weights=True)
        history = model.fit(X_train,
                            to_categorical(y_train),
                            batch_size=256,
                            epochs=200,
                            validation_data=(X_valid, to_categorical(y_valid)),
                            callbacks=[es])

        proba_test = model.predict(X_test)
        aucs[k] = roc_auc_score(y_test, proba_test[:, 1])
        print('S{0}, P{1} -- AUC: {2}'.format(subject, k, aucs[k]))
        K.clear_session()

    np.savetxt(modelpath + '/s' + str(subject) + '_aucs.npy', aucs)
# model configurations may do better, but this is a good starting point)
model = EEGNet(nb_classes=2,
               Chans=chans,
               Samples=samples,
               dropoutRate=0.5,
               kernLength=256,
               F1=4,
               D=2,
               F2=8,
               dropoutType='Dropout')

# compile the model and set the optimizers
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()
# count number of parameters in the model
numParams = model.count_params()

# set a valid path for your system to record model checkpoints
checkpointer = ModelCheckpoint(
    filepath='/content/gdrive/MyDrive/checkpoint.h5',
    verbose=1,
    save_best_only=True)

###############################################################################
# if the classification task was imbalanced (significantly more trials in one
# class versus the others) you can assign a weight to each class during
# optimization to balance it out. This data is approximately balanced so we
# don't need to do this, but is shown here for illustration/completeness.
###############################################################################
Пример #4
0
def evaluate_cross_subject_model(data, labels, modelpath):
    """
    Trains and evaluates EEGNet for each subject in the P300 Speller database
    using random cross validation.
    """
    n_sub = data.shape[0]
    n_ex_sub = data.shape[1]
    n_samples = data.shape[2]
    n_channels = data.shape[3]

    aucs = np.zeros(n_sub)

    data = data.reshape((n_sub * n_ex_sub, n_samples, n_channels))
    labels = labels.reshape((n_sub * n_ex_sub))
    groups = np.array([i for i in range(n_sub) for j in range(n_ex_sub)])

    cv = LeaveOneGroupOut()
    for k, (t, v) in enumerate(cv.split(data, labels, groups)):
        X_train, y_train, X_test, y_test = data[t], labels[t], data[v], labels[
            v]

        rg = np.random.choice(t, 1)
        sv = groups[t] == groups[rg]
        st = np.logical_not(sv)
        X_train, y_train, X_valid, y_valid = data[t][st], labels[t][st], data[
            t][sv], labels[t][sv]
        print("Partition {0}: train = {1}, valid = {2}, test = {3}".format(
            k, X_train.shape, X_valid.shape, X_test.shape))
        print("Groups train = {0}, valid = {1}, test = {2}".format(
            np.unique(groups[t][st]), np.unique(groups[t][sv]),
            np.unique(groups[v])))

        # channel-wise feature standarization
        sc = EEGChannelScaler(n_channels=n_channels)
        X_train = np.swapaxes(
            sc.fit_transform(X_train)[:, np.newaxis, :], 2, 3)
        X_valid = np.swapaxes(sc.transform(X_valid)[:, np.newaxis, :], 2, 3)
        X_test = np.swapaxes(sc.transform(X_test)[:, np.newaxis, :], 2, 3)

        model = EEGNet(2,
                       dropoutRate=0.25,
                       Chans=n_channels,
                       Samples=n_samples)
        print(model.summary())
        model.compile(optimizer='adam', loss='categorical_crossentropy')

        es = EarlyStopping(monitor='val_loss',
                           mode='min',
                           patience=50,
                           restore_best_weights=True)
        model.fit(X_train,
                  to_categorical(y_train),
                  batch_size=256,
                  epochs=200,
                  validation_data=(X_valid, to_categorical(y_valid)),
                  callbacks=[es])

        proba_test = model.predict(X_test)
        aucs[k] = roc_auc_score(y_test, proba_test[:, 1])
        print('P{0} -- AUC: {1}'.format(k, aucs[k]))
        K.clear_session()

    np.savetxt(modelpath + '/aucs.npy', aucs)
Пример #5
0
del y1_test, y2_test, y3_test, y4_test, y5_test

# X_train = X_train[:,4:9,:,50:150]
# X_test = X_test[:,4:9,:,50:150]

#format to match EEGnet

# X_train = EEGnetFormat(X_train)
# X_test = EEGnetFormat(X_test)

model = EEGNet(nb_classes=1, Chans=35, Samples=100)
model.compile(optimizer='adam',
              loss=['binary_crossentropy'],
              metrics=['accuracy'])

print(model.summary())

#train the model
csv_logger = CSVLogger(out + '.log')
filepath = out + ".hdf5"
tensorboard = TensorBoard(log_dir="../logs/{}_{}".format(out, time()))
checkpointer = ModelCheckpoint(monitor='val_loss',
                               filepath=filepath,
                               verbose=1,
                               save_best_only=True)
early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=10)
model.fit(x=X_train,
          y=y_train,
          batch_size=128,
          epochs=epochs,
          validation_data=(X_test, y_test),