# plot gamma, alpha, accuracy
# save png & 3d data or not
saveDataBool = 0
dir_n = directory + 'res_230319/'
plot_gamma_motor(subject_name, subjects_dir, dir_n, elec_up, mlabViewX,
                 mlabViewY, saveDataBool)
file_n = directory + 'res_touch.mat'
plot_gamma_touch(subject_name, subjects_dir, file_n, elec_up, mlabViewX,
                 mlabViewY, saveDataBool)
plot_alpha_motor(subject_name, subjects_dir, dir_n, elec_up, mlabViewX,
                 mlabViewY, saveDataBool)
plot_alpha_touch(subject_name, subjects_dir, file_n, elec_up, mlabViewX,
                 mlabViewY, saveDataBool)
dir_n = directory + 'pet67_paper/'
plot_accuracy(subject_name, subjects_dir, dir_n, elec_up, mlabViewX, mlabViewY,
              saveDataBool)

# TPS
# source control points
x, y = np.linspace(0, 0.021, 8), np.linspace(0, 0.021,
                                             8)  # construct electrodes grid
x, y = np.meshgrid(x, y)
xs = x.flatten()
ys = y.flatten()
control_points = np.vstack([xs, ys, np.zeros(64)]).T

# make T
T = make_T(control_points)

# target control points
points = deepcopy(elec_up) / 1000
# print model structure diagram
print(classifier.summary())

# Compiling the CNN
classifier.compile(adam(lr=.0001),
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])

cp_callback = create_callback(checkpoint_path, 1)

hist = classifier.fit_generator(
    train_generator,
    epochs=num_epoch,
    steps_per_epoch=num_train_samples // batch_size,
    validation_data=validation_generator,
    validation_steps=num_validate_samples // batch_size,
    class_weight='auto',
    callbacks=[cp_callback])

# Plot training and validation accuracy
plot_accuracy(hist)

# Plot training and validation loss
plot_loss(hist)

# Print test set accuracy and loss values
scores = classifier.evaluate_generator(test_generator,
                                       num_test_samples / batch_size)
print("loss: {}, accuracy: {}".format(scores[0], scores[1]))
Beispiel #3
0
                         'accuracy SD')].sort_values('accuracy MEAN',
                                                     ascending=False)).head())
    print()
    print('Lowest accuracy:')
    print((evaluation.loc[(slice(None), slice(None), 'cross-validation',
                           'Classifier'),
                          ('accuracy MEAN',
                           'accuracy SD')].sort_values('accuracy MEAN',
                                                       ascending=True)).head())

# %% Compare the different classifiers
filenames = ['knn', 'ridge', 'dt']

if True:
    # For cross-validation scatter-plot accuracy mean and standard deviation
    functions.plot_accuracy(path, filenames)
if True:
    # For cross-validation scatter-plot fit time mean and score time
    functions.plot_efficiency(path, filenames)

if True:
    # List variants of each classifier with highest accuracy values
    all_evaluations = pd.DataFrame()
    for filename in filenames:
        evaluation = pd.read_hdf(os.path.join(path,
                                              filename + '_evaluation.h5'),
                                 key='evaluation')

        # Select only rows with cross-validation and only Classifier (no baselines)
        rows = ()
        for name in evaluation.index.names:
Beispiel #4
0
# print model structure diagram
print(model.summary())

# Transfer Learning
print("\nPerforming Transfer Learning")

# Compiling the model
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# Fit the Transfer Learning model to the data from the generators
history = model.fit_generator(train_generator,
                              epochs=num_epoch,
                              steps_per_epoch=num_train_samples // batch_size,
                              validation_data=validation_generator,
                              validation_steps=num_validate_samples //
                              batch_size,
                              class_weight='auto',
                              shuffle=True)

# Plot training and validation accuracy
plot_accuracy(history)

# Plot training and validation loss
plot_loss(history)

# Print test set accuracy and loss values
scores = model.evaluate_generator(test_generator,
                                  num_test_samples / batch_size)
print("loss: {}, accuracy: {}".format(scores[0], scores[1]))
    print('Highest accuracy:')
    print((evaluation.loc[(slice(None), slice(None),
        'cross-validation', 'Classifier'), ('accuracy MEAN', 'accuracy SD')].
        sort_values('accuracy MEAN', ascending=False)).head())
    print()
    print('Lowest accuracy:')
    print((evaluation.loc[(slice(None), slice(None),
        'cross-validation', 'Classifier'), ('accuracy MEAN', 'accuracy SD')].
        sort_values('accuracy MEAN', ascending=True)).head())

#%% Compare the different classifiers 
filenames = ['knn', 'ridge', 'dt']
if False:
    # For cross-validation scatter-plot accuracy mean and standard deviation
    functions.plot_accuracy(cfg.default.occupancy_figures, filenames)
if False:
    # For cross-validation scatter-plot fit time mean and score time
    functions.plot_efficiency(cfg.default.occupancy_figures, filenames)

if False:
    # List variants of each classifier with highest accuracy values
    all_evaluations = pd.DataFrame()
    for filename in filenames:
        path = cfg.default.occupancy_figures
        evaluation = pd.read_hdf(os.path.join(path, filename + '_evaluation.h5'),
            key='evaluation')

        # Select only rows with cross-validation and only Classifier (no baselines)
        rows = ()
        for name in evaluation.index.names: