Exemple #1
0
# count number of parameters in the model
numParams = model.count_params()

# set a valid path for your system to record model checkpoints
checkpointer = ModelCheckpoint(filepath='/tmp/checkpoint.h5', verbose=1,
                               save_best_only=True)

# Weighted loss
weight_0 = 1/(len([y for y in y_train_valid if y == 0]))
weight_1 = 1/(len([y for y in y_train_valid if y == 1]))
class_weights = {0: weight_0, 1: weight_1}
#
# # fit the model
fittedModel = model.fit(X_train, y_train, batch_size=34, epochs=100,
                        verbose=2, validation_data=(X_valid, y_valid),
                        callbacks=[checkpointer], class_weight=class_weights)

# load optimal weights
model.load_weights('/tmp/checkpoint.h5')

# Evaluate
y_probs = model.predict(X_test)
y_pred = y_probs.argmax(axis=-1)

# save score
csv = pd.read_csv('./data/benchmark.csv')
csv['Prediction'] = y_probs
csv.to_csv('./submission/submissionEEGNET.csv', index=False)
print('--------------------Submission file has been generated.--------------------------')
# fit the model. Due to very small sample sizes this can get
# pretty noisy run-to-run, but most runs should be comparable to xDAWN +
# Riemannian geometry classification (below)
################################################################################
hist = model.fit(X_train,
                 Y_train,
                 batch_size=16,
                 epochs=100,
                 verbose=2,
                 validation_split=0.33,
                 shuffle=True,
                 callbacks=[checkpointer],
                 class_weight=class_weights)

# load optimal weights
model.load_weights('/content/gdrive/MyDrive/checkpoint.h5')

###############################################################################
# can alternatively used the weights provided in the repo. If so it should get
# you 93% accuracy. Change the WEIGHTS_PATH variable to wherever it is on your
# system.
###############################################################################

# WEIGHTS_PATH = /path/to/EEGNet-8-2-weights.h5
# model.load_weights(WEIGHTS_PATH)

###############################################################################
# make prediction on test set.
###############################################################################

probs = model.predict(X_test)
Exemple #3
0
# Syntax is {class_1:weight_1, class_2:weight_2,...}.
 
# Weighted loss
weight_0 = 1/(len([y for y in y_train_valid if y == 0]))
weight_1 = 1/(len([y for y in y_train_valid if y == 1]))

class_weights = {0:weight_0, 1:weight_1}

################################################################################
# Fit the model. Due to very small sample sizes this can get
# pretty noisy run-to-run.
################################################################################
fittedModel = model.fit(X_train, Y_train, batch_size = 34, epochs = 100, 
                        verbose = 2, validation_data=(X_valid, Y_valid),
                        callbacks=[checkpointer], class_weight = class_weights)

# Load optimal weights
model.load_weights(filepath)

###############################################################################
# Make prediction on test set.
###############################################################################

probs       = model.predict(X_test)
preds       = probs.argmax(axis = -1)  
acc         = np.mean(preds == y_test.argmax(axis=-1))
auc         = roc_auc_score(y_test,preds)

print("Classification Accuracy: %f " % (acc))
print("Area Under Curve: %f" % (auc))
Exemple #4
0
# the syntax is {class_1:weight_1, class_2:weight_2,...}. Here just setting
# the weights all to be 1
class_weights = {0 :1, 1 :1}

################################################################################
# fit the model. Due to very small sample sizes this can get
# pretty noisy run-to-run, but most runs should be comparable to xDAWN +
# Riemannian geometry classification (below)
################################################################################
hist = model.fit(X_train, Y_train, batch_size = 16, epochs = 150,
                 verbose = 2, validation_split=0.33, shuffle=True,
                 callbacks=[checkpointer], class_weight = class_weights)

# load optimal weights
model.load_weights('C:\\Users\\USER\\PycharmProjects\\eegnet\\tmp\\checkpoint2.h5')

###############################################################################
# can alternatively used the weights provided in the repo. If so it should get
# you 93% accuracy. Change the WEIGHTS_PATH variable to wherever it is on your
# system.
###############################################################################

# WEIGHTS_PATH = /path/to/EEGNet-8-2-weights.h5
# model.load_weights(WEIGHTS_PATH)

###############################################################################
# make prediction on test set.
###############################################################################

probs = model.predict(X_test)
Exemple #5
0
                        verbose=2,
                        validation_data=(X_validate, Y_validate),
                        callbacks=[checkpointer],
                        class_weight=class_weights)

# load optimal weights
# model.load_weights('/tmp/checkpoint.h5')

###############################################################################
# can alternatively used the weights provided in the repo. If so it should get
# you 93% accuracy. Change the WEIGHTS_PATH variable to wherever it is on your
# system.
###############################################################################

WEIGHTS_PATH = "EEGNet-8-2-weights.h5"
model.load_weights(WEIGHTS_PATH)

###############################################################################
# make prediction on test set.
###############################################################################

probs = model.predict(X_test)
preds = probs.argmax(axis=-1)
acc = np.mean(preds == Y_test.argmax(axis=-1))
print("Classification accuracy: %f " % (acc))

############################# PyRiemann Portion ##############################

# code is taken from PyRiemann's ERP sample script, which is decoding in
# the tangent space with a logistic regression
Exemple #6
0
 # optimization to balance it out. This data is approximately balanced so we 
 # don't need to do this, but is shown here for illustration/completeness. 
 ##########################################################################
 # the syntax is {class_1:weight_1, class_2:weight_2,...}. Here just setting
 # the weights all to be 1
 class_weights = {0:1, 1:1}
     
 ###########################################################################
 #fit the model.
 ###########################################################################
 fittedModel = model.fit(X_train, Y_train, batch_size = 16, epochs = 300, 
                         verbose = 2, validation_data=(X_validate, Y_validate),
                         callbacks=[checkpointer], class_weight = class_weights)
 
 # load optimal weights
 model.load_weights('C:/Users/PUBLIC.DESKTOP-8KLP27O/Desktop/SSSEP/SSSEP_data/tmp/checkpoint.h5')
     
 ###############################################################################
 # can alternatively used the weights provided in the repo. If so it should get
 # you 93% accuracy. Change the WEIGHTS_PATH variable to wherever it is on your
 # system.
 ###############################################################################
 
 # WEIGHTS_PATH = /path/to/EEGNet-8-2-weights.h5 
 # model.load_weights(WEIGHTS_PATH)
 
 ###############################################################################
 # make prediction on test set.
 ###############################################################################
 
 probs       = model.predict(X_test)
Exemple #7
0
    class_weights = {0: 1, 1: 1}

    ###########################################################################
    #fit the model.
    ###########################################################################
    fittedModel = model.fit(X_train,
                            Y_train,
                            batch_size=8,
                            epochs=20,
                            verbose=2,
                            validation_data=(X_validate, Y_validate),
                            callbacks=[checkpointer],
                            class_weight=class_weights)

    # load optimal weights
    model.load_weights(r'C:/Users/oo/Desktop/SSSEP/tmp/checkpoint.h5')

    ###############################################################################
    # can alternatively used the weights provided in the repo. If so it should get
    # you 93% accuracy. Change the WEIGHTS_PATH variable to wherever it is on your
    # system.
    ###############################################################################

    # WEIGHTS_PATH = /path/to/EEGNet-8-2-weights.h5
    # model.load_weights(WEIGHTS_PATH)

    ###############################################################################
    # make prediction on test set.
    ###############################################################################

    probs = model.predict(X_test)
# fit the model. Due to very small sample sizes this can get
# pretty noisy run-to-run, but most runs should be comparable to xDAWN +
# Riemannian geometry classification (below)
################################################################################
hist = model.fit(X_train,
                 Y_train,
                 batch_size=16,
                 epochs=500,
                 verbose=2,
                 validation_data=(X_validate, Y_validate),
                 callbacks=[checkpointer],
                 class_weight=class_weights)

# load optimal weights
model.load_weights(
    'C:/MATLAB/DataPreprocessing/DataAnalysis/eeglab_current/tmp/checkpoint.h5'
)

###############################################################################
# can alternatively used the weights provided in the repo. If so it should get
# you 93% accuracy. Change the WEIGHTS_PATH variable to wherever it is on your
# system.
###############################################################################

# WEIGHTS_PATH = /path/to/EEGNet-8-2-weights.h5
# model.load_weights(WEIGHTS_PATH)

###############################################################################
# make prediction on test set.
###############################################################################
Exemple #9
0
# set a valid path for your system to record model checkpoints
checkpointer = ModelCheckpoint(filepath='./checkpoint.h5',
                               verbose=1,
                               save_best_only=True)

# the syntax is {class_1:weight_1, class_2:weight_2,...}. Here just setting
# the weights all to be 1
# unused
class_weights = {0: 1, 1: 1, 2: 1, 3: 1}
with tf.device('/device:GPU:0'):
    fittedModel = model.fit(X_train,
                            Y_train,
                            batch_size=16,
                            epochs=150,
                            verbose=2,
                            validation_data=(X_validate, Y_validate),
                            callbacks=[checkpointer])

# load optimal weights
model.load_weights('./checkpoint.h5')

###############################################################################
# make prediction on test set.
###############################################################################

probs = model.predict(X_test)
preds = probs.argmax(axis=-1)
acc = np.mean(preds == Y_test.argmax(axis=-1))
print("Classification accuracy: %f " % (acc))