Exemplo n.º 1
0
 def get_single_subj_dataset(self, subject=None, final_evaluation=False):
     if subject not in self.datasets['train'].keys():
         self.datasets['train'][subject], self.datasets['valid'][subject], self.datasets['test'][subject] = \
             get_train_val_test(global_vars.get('data_folder'), subject)
     single_subj_dataset = OrderedDict(
         (('train', self.datasets['train'][subject]),
          ('valid', self.datasets['valid'][subject]),
          ('test', self.datasets['test'][subject])))
     if final_evaluation:
         single_subj_dataset['train'] = concatenate_sets(
             [single_subj_dataset['train'], single_subj_dataset['valid']])
     return single_subj_dataset
Exemplo n.º 2
0
 def get_single_subj_dataset(self, subject=None, final_evaluation=False):
     if subject not in self.datasets['train'].keys():
         self.datasets['train'][subject], self.datasets['valid'][subject], self.datasets['test'][subject] = \
             get_train_val_test(global_vars.get('data_folder'), subject)
     single_subj_dataset = OrderedDict((('train', self.datasets['train'][subject]),
                                        ('valid', self.datasets['valid'][subject]),
                                        ('test', self.datasets['test'][subject])))
     if final_evaluation and global_vars.get('ensemble_iterations'):
         single_subj_dataset['train'] = concatenate_sets(
             [single_subj_dataset['train'], single_subj_dataset['valid']])
     if global_vars.get('time_frequency'):
         EEG_to_TF_mne(single_subj_dataset)
         set_global_vars_by_dataset(single_subj_dataset['train'])
     return single_subj_dataset
Exemplo n.º 3
0
def target_exp(exp_name, csv_file, subjects, model_from_file=None, write_header=True):
    stop_criterion, iterator, loss_function, monitors = get_settings()
    if write_header:
        with open(csv_file, 'a', newline='') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=FIELDNAMES)
            writer.writeheader()
    for subject_id in subjects:
        train_set = {}
        val_set = {}
        test_set = {}
        if model_from_file is not None and global_vars.get('per_subject_exclusive') and \
                not_exclusively_in(subject_id, model_from_file):
            continue
        train_set[subject_id], val_set[subject_id], test_set[subject_id] =\
            get_train_val_test(data_folder, subject_id)
        eegnas_from_file = EEGNAS_from_file(iterator=iterator, exp_folder=f"results/{exp_name}", exp_name = exp_name,
                            train_set=train_set, val_set=val_set, test_set=test_set,
                            stop_criterion=stop_criterion, monitors=monitors, loss_function=loss_function,
                            subject_id=subject_id, fieldnames=FIELDNAMES,
                            csv_file=csv_file, model_from_file=model_from_file)
        eegnas_from_file.run_target_model()
Exemplo n.º 4
0
low_cut_hz = 0
fs = 250
valid_set_fraction = 0.2
dataset = 'BCI_IV_2a'
data_folder = '../data/'
global_vars.set('dataset', dataset)
set_params_by_dataset()
global_vars.set('cuda', True)
model_select = 'deep4'
model_dir = '143_x_evolution_layers_cross_subject'
model_name = 'best_model_9_8_6_7_2_1_3_4_5.th'
train_set = {}
val_set = {}
test_set = {}
train_set[subject_id], val_set[subject_id], test_set[subject_id] =\
            get_train_val_test(data_folder, subject_id)

# Set if you want to use GPU
# You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
cuda = True
set_random_seeds(seed=20170629, cuda=cuda)

# This will determine how many crops are processed in parallel
input_time_length = 450
# final_conv_length determines the size of the receptive field of the ConvNet
models = {
    'evolution': torch.load(f'../models/{model_dir}/{model_name}'),
    'deep4': target_model('deep')
}
model = models[model_select]
input_time_length = global_vars.get('input_time_len')
Exemplo n.º 5
0
 set_params_by_dataset()
 model_selection = 'deep4'
 cnn_layer = {'evolution': 10, 'deep4': 25}
 filter_pos = {'evolution': 0, 'deep4': 0}
 model_dir = '91_x_BCI_IV_2b'
 model_name = 'best_model_5_1_8_7_9_2_3_4_6.th'
 model = {
     'evolution': torch.load(f'../../models/{model_dir}/{model_name}'),
     'deep4': target_model('deep')
 }
 subject_id = 1
 train_set = {}
 val_set = {}
 test_set = {}
 train_set[subject_id], val_set[subject_id], test_set[subject_id] = \
     get_train_val_test(global_vars.get('data_folder'), subject_id)
 stop_criterion, iterator, loss_function, monitors = get_normal_settings()
 naiveNAS = NaiveNAS(iterator=iterator,
                     exp_folder=None,
                     exp_name=None,
                     train_set=train_set,
                     val_set=val_set,
                     test_set=test_set,
                     stop_criterion=stop_criterion,
                     monitors=monitors,
                     loss_function=loss_function,
                     config=global_vars.config,
                     subject_id=subject_id,
                     fieldnames=None,
                     strategy='cross_subject',
                     evolution_file=None,