def train(config): cuda = True model = config['model'] if model == 'deep': model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length, final_conv_length=2, config=config).create_network() to_dense_prediction_model(model) if cuda: model.cuda() log.info("Model: \n{:s}".format(str(model))) dummy_input = np_to_var(train_set.X[:1, :, :, None]) if cuda: dummy_input = dummy_input.cuda() out = model(dummy_input) n_preds_per_input = out.cpu().data.numpy().shape[2] optimizer = optim.Adam(model.parameters()) iterator = CropsFromTrialsIterator(batch_size=60, input_time_length=input_time_length, n_preds_per_input=n_preds_per_input) stop_criterion = Or([MaxEpochs(20), NoDecrease('valid_misclass', 80)]) monitors = [ LossMonitor(), MisclassMonitor(col_suffix='sample_misclass'), CroppedTrialMisclassMonitor(input_time_length=input_time_length), RuntimeMonitor() ] model_constraint = MaxNormDefaultConstraint() loss_function = lambda preds, targets: F.nll_loss( th.mean(preds, dim=2, keepdim=False), targets) exp = Experiment(model, train_set, valid_set, test_set, iterator=iterator, loss_function=loss_function, optimizer=optimizer, model_constraint=model_constraint, monitors=monitors, stop_criterion=stop_criterion, remember_best_column='valid_misclass', run_after_early_stop=True, cuda=cuda) exp.run() print(exp.rememberer) return exp.rememberer.lowest_val
def setUp(self): args = parse_args( ['-e', 'tests', '-c', '../configurations/config.ini']) init_config(args.config) configs = get_configurations(args.experiment) assert (len(configs) == 1) global_vars.set_config(configs[0]) global_vars.set('eeg_chans', 22) global_vars.set('num_subjects', 9) global_vars.set('input_time_len', 1125) global_vars.set('n_classes', 4) set_params_by_dataset() input_shape = (50, global_vars.get('eeg_chans'), global_vars.get('input_time_len')) class Dummy: def __init__(self, X, y): self.X = X self.y = y dummy_data = Dummy(X=np.ones(input_shape, dtype=np.float32), y=np.ones(50, dtype=np.longlong)) self.iterator = BalancedBatchSizeIterator( batch_size=global_vars.get('batch_size')) self.loss_function = F.nll_loss self.monitors = [ LossMonitor(), MisclassMonitor(), GenericMonitor('accuracy', acc_func), RuntimeMonitor() ] self.stop_criterion = Or([ MaxEpochs(global_vars.get('max_epochs')), NoDecrease('valid_misclass', global_vars.get('max_increase_epochs')) ]) self.naiveNAS = NaiveNAS(iterator=self.iterator, exp_folder='../tests', exp_name='', train_set=dummy_data, val_set=dummy_data, test_set=dummy_data, stop_criterion=self.stop_criterion, monitors=self.monitors, loss_function=self.loss_function, config=global_vars.config, subject_id=1, fieldnames=None, model_from_file=None)
def network_model(model, train_set, test_set, valid_set, n_chans, input_time_length, cuda): max_epochs = 30 max_increase_epochs = 10 batch_size = 64 init_block_size = 1000 set_random_seeds(seed=20190629, cuda=cuda) n_classes = 2 n_chans = n_chans input_time_length = input_time_length if model == 'deep': model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length, final_conv_length='auto').create_network() elif model == 'shallow': model = ShallowFBCSPNet(n_chans, n_classes, input_time_length=input_time_length, final_conv_length='auto').create_network() if cuda: model.cuda() log.info("%s model: ".format(str(model))) optimizer = AdamW(model.parameters(), lr=0.00625, weight_decay=0) iterator = BalancedBatchSizeIterator(batch_size=batch_size) stop_criterion = Or([MaxEpochs(max_epochs), NoDecrease('valid_misclass', max_increase_epochs)]) monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()] model_constraint = None print(train_set.X.shape[0]) model_test = Experiment(model, train_set, valid_set, test_set, iterator=iterator, loss_function=F.nll_loss, optimizer=optimizer, model_constraint=model_constraint, monitors=monitors, stop_criterion=stop_criterion, remember_best_column='valid_misclass', run_after_early_stop=True, cuda=cuda) model_test.run() return model_test
def run_exp(data_folder, session_id, subject_id, low_cut_hz, model, cuda): ival = [-500, 4000] max_epochs = 1600 max_increase_epochs = 160 batch_size = 10 high_cut_hz = 38 factor_new = 1e-3 init_block_size = 1000 valid_set_fraction = .2 ''' # BCIcompetition train_filename = 'A{:02d}T.gdf'.format(subject_id) test_filename = 'A{:02d}E.gdf'.format(subject_id) train_filepath = os.path.join(data_folder, train_filename) test_filepath = os.path.join(data_folder, test_filename) train_label_filepath = train_filepath.replace('.gdf', '.mat') test_label_filepath = test_filepath.replace('.gdf', '.mat') train_loader = BCICompetition4Set2A( train_filepath, labels_filename=train_label_filepath) test_loader = BCICompetition4Set2A( test_filepath, labels_filename=test_label_filepath) train_cnt = train_loader.load() test_cnt = test_loader.load() ''' # GIGAscience filename = 'sess{:02d}_subj{:02d}_EEG_MI.mat'.format( session_id, subject_id) filepath = os.path.join(data_folder, filename) train_variable = 'EEG_MI_train' test_variable = 'EEG_MI_test' train_loader = GIGAscience(filepath, train_variable) test_loader = GIGAscience(filepath, test_variable) train_cnt = train_loader.load() test_cnt = test_loader.load() # Preprocessing ''' channel ['Fp1', 'Fp2', 'F7', 'F3', 'Fz', 'F4', 'F8', 'FC5', 'FC1', 'FC2', 'FC6', 'T7', 'C3', 'Cz', 'C4', 'T8', 'TP9', 'CP5', 'CP1', 'CP2', 'CP6', 'TP10', 'P7', 'P3', 'Pz', 'P4', 'P8', 'PO9', 'O1', 'Oz', 'O2', 'PO10', 'FC3', 'FC4', 'C5', 'C1', 'C2', 'C6', 'CP3', 'CPz', 'CP4', 'P1', 'P2', 'POz', 'FT9', 'FTT9h', 'TTP7h', 'TP7', 'TPP9h', 'FT10', 'FTT10h', 'TPP8h', 'TP8', 'TPP10h', 'F9', 'F10', 'AF7', 'AF3', 'AF4', 'AF8', 'PO3', 'PO4'] ''' train_cnt = train_cnt.pick_channels([ 'FC5', 'FC3', 'FC1', 'Fz', 'FC2', 'FC4', 'FC6', 'C5', 'C3', 'C1', 'Cz', 'C2', 'C4', 'C6', 'CP5', 'CP3', 'CP1', 'CPz', 'CP2', 'CP4', 'CP6', 'Pz' ]) train_cnt, train_cnt.info['events'] = train_cnt.copy().resample( 250, npad='auto', events=train_cnt.info['events']) assert len(train_cnt.ch_names) == 22 # lets convert to millvolt for numerical stability of next operations train_cnt = mne_apply(lambda a: a * 1e6, train_cnt) train_cnt = mne_apply( lambda a: bandpass_cnt(a, low_cut_hz, high_cut_hz, train_cnt.info['sfreq'], filt_order=3, axis=1), train_cnt) train_cnt = mne_apply( lambda a: exponential_running_standardize(a.T, factor_new=factor_new, init_block_size= init_block_size, eps=1e-4).T, train_cnt) test_cnt = test_cnt.pick_channels([ 'FC5', 'FC3', 'FC1', 'Fz', 'FC2', 'FC4', 'FC6', 'C5', 'C3', 'C1', 'Cz', 'C2', 'C4', 'C6', 'CP5', 'CP3', 'CP1', 'CPz', 'CP2', 'CP4', 'CP6', 'Pz' ]) test_cnt, test_cnt.info['events'] = test_cnt.copy().resample( 250, npad='auto', events=test_cnt.info['events']) assert len(test_cnt.ch_names) == 22 test_cnt = mne_apply(lambda a: a * 1e6, test_cnt) test_cnt = mne_apply( lambda a: bandpass_cnt(a, low_cut_hz, high_cut_hz, test_cnt.info['sfreq'], filt_order=3, axis=1), test_cnt) test_cnt = mne_apply( lambda a: exponential_running_standardize(a.T, factor_new=factor_new, init_block_size= init_block_size, eps=1e-4).T, test_cnt) marker_def = OrderedDict([('Right Hand', [1]), ('Left Hand', [2])]) train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival) test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival) train_set, valid_set = split_into_two_sets(train_set, first_set_fraction=1 - valid_set_fraction) set_random_seeds(seed=20190706, cuda=cuda) n_classes = 2 n_chans = int(train_set.X.shape[1]) input_time_length = train_set.X.shape[2] if model == 'shallow': model = ShallowFBCSPNet(n_chans, n_classes, input_time_length=input_time_length, final_conv_length='auto').create_network() elif model == 'deep': model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length, final_conv_length='auto').create_network() if cuda: model.cuda() log.info("Model: \n{:s}".format(str(model))) optimizer = optim.Adam(model.parameters()) iterator = BalancedBatchSizeIterator(batch_size=batch_size) stop_criterion = Or([ MaxEpochs(max_epochs), NoDecrease('valid_misclass', max_increase_epochs) ]) monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()] model_constraint = MaxNormDefaultConstraint() exp = Experiment(model, train_set, valid_set, test_set, iterator=iterator, loss_function=F.nll_loss, optimizer=optimizer, model_constraint=model_constraint, monitors=monitors, stop_criterion=stop_criterion, remember_best_column='valid_misclass', run_after_early_stop=True, cuda=cuda) exp.run() return exp
def train_model(self, train_set, val_set, test_set, save_model): """ :param train_set: EEG data (n_trials*n_channels*n_samples) :param val_set: EEG data (n_trials*n_channels*n_samples) :param test_set: EEG data (n_trials*n_channels*n_samples) - can be None when training on inner-fold :param save_model: Boolean: True if trained model is to be saved :return: Accuracy and loss scores for the model trained with a given set of hyper-parameters """ predictions = None model = None model = self.call_model() set_random_seeds(seed=20190629, cuda=self.cuda) if self.cuda: model.cuda() torch.backends.cudnn.deterministic = True log.info("%s model: ".format(str(model))) optimizer = optim.Adam(model.parameters(), lr=self.learning_rate, weight_decay=0, eps=1e-8, amsgrad=False) stop_criterion = Or([ MaxEpochs(self.epochs), NoDecrease('valid_misclass', self.max_increase_epochs) ]) model_loss_function = None #####Setup to run the selected model##### model_test = Experiment(model, train_set, val_set, test_set=test_set, iterator=self.iterator, loss_function=self.loss, optimizer=optimizer, model_constraint=self.model_constraint, monitors=self.monitors, stop_criterion=stop_criterion, remember_best_column='valid_misclass', run_after_early_stop=True, model_loss_function=model_loss_function, cuda=self.cuda, data_type=self.data_type, model_type=self.model_type, subject_id=self.subject, model_number=str(self.model_number), save_model=save_model) model_test.run() model_acc = model_test.epochs_df['valid_misclass'].astype('float') model_loss = model_test.epochs_df['valid_loss'].astype('float') current_val_acc = 1 - current_acc(model_acc) current_val_loss = current_loss(model_loss) test_accuracy = None if test_set is not None: test_accuracy = round( (1 - model_test.epochs_df['test_misclass'].min()) * 100, 3) predictions = model_test.predictions probabilities = model_test.probabilites return current_val_acc, current_val_loss, test_accuracy, model_test, predictions, probabilities
def build_exp(model_name, cuda, data, batch_size, max_epochs, max_increase_epochs): log.info("==============================") log.info("Loading Data...") log.info("==============================") train_set = data.train_set valid_set = data.validation_set test_set = data.test_set log.info("==============================") log.info("Setting Up Model...") log.info("==============================") set_random_seeds(seed=20190706, cuda=cuda) n_classes = 4 n_chans = int(train_set.X.shape[1]) input_time_length = train_set.X.shape[2] if model_name == "shallow": model = NewShallowNet( n_chans, n_classes, input_time_length, final_conv_length="auto" ) # model = ShallowFBCSPNet( # n_chans, # n_classes, # input_time_length=input_time_length, # final_conv_length="auto", # ).create_network() elif model_name == "deep": model = NewDeep4Net(n_chans, n_classes, input_time_length, "auto") # model = Deep4Net( # n_chans, # n_classes, # input_time_length=input_time_length, # final_conv_length="auto", # ).create_network() elif model_name == "eegnet": # model = EEGNet(n_chans, n_classes, # input_time_length=input_time_length) # model = EEGNetv4(n_chans, n_classes, # input_time_length=input_time_length).create_network() model = NewEEGNet(n_chans, n_classes, input_time_length=input_time_length) if cuda: model.cuda() log.info("==============================") log.info("Logging Model Architecture:") log.info("==============================") log.info("Model: \n{:s}".format(str(model))) log.info("==============================") log.info("Building Experiment:") log.info("==============================") optimizer = optim.Adam(model.parameters()) iterator = BalancedBatchSizeIterator(batch_size=batch_size) stop_criterion = Or( [MaxEpochs(max_epochs), NoDecrease("valid_misclass", max_increase_epochs)] ) monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()] model_constraint = MaxNormDefaultConstraint() exp = Experiment( model, train_set, valid_set, test_set, iterator=iterator, loss_function=F.nll_loss, optimizer=optimizer, model_constraint=model_constraint, monitors=monitors, stop_criterion=stop_criterion, remember_best_column="valid_misclass", run_after_early_stop=True, cuda=cuda, ) return exp
def run_exp(epoches, batch_size, subject_num, model_type, cuda, single_subject, single_subject_num): # ival = [-500, 4000] max_increase_epochs = 160 # Preprocessing X, y = loadSubjects(subject_num, single_subject, single_subject_num) X = X.astype(np.float32) y = y.astype(np.int64) X, y = shuffle(X, y) trial_length = X.shape[2] print("trial_length " + str(trial_length)) print("trying to run with {} sec trials ".format((trial_length - 1) / 256)) print("y") print(y) trainingSampleSize = int(len(X) * 0.6) valudationSampleSize = int(len(X) * 0.2) testSampleSize = int(len(X) * 0.2) print("INFO : Training sample size: {}".format(trainingSampleSize)) print("INFO : Validation sample size: {}".format(valudationSampleSize)) print("INFO : Test sample size: {}".format(testSampleSize)) train_set = SignalAndTarget(X[:trainingSampleSize], y=y[:trainingSampleSize]) valid_set = SignalAndTarget( X[trainingSampleSize:(trainingSampleSize + valudationSampleSize)], y=y[trainingSampleSize:(trainingSampleSize + valudationSampleSize)]) test_set = SignalAndTarget(X[(trainingSampleSize + valudationSampleSize):], y=y[(trainingSampleSize + valudationSampleSize):]) set_random_seeds(seed=20190706, cuda=cuda) n_classes = 3 n_chans = int(train_set.X.shape[1]) input_time_length = train_set.X.shape[2] if model_type == 'shallow': model = ShallowFBCSPNet(n_chans, n_classes, input_time_length=input_time_length, final_conv_length='auto').create_network() elif model_type == 'deep': model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length, final_conv_length='auto').create_network() elif model_type == 'eegnet': model = EEGNetv4(n_chans, n_classes, input_time_length=input_time_length, final_conv_length='auto').create_network() if cuda: model.cuda() log.info("Model: \n{:s}".format(str(model))) optimizer = optim.Adam(model.parameters()) iterator = BalancedBatchSizeIterator(batch_size=batch_size) stop_criterion = Or([ MaxEpochs(max_epochs), NoDecrease('valid_misclass', max_increase_epochs) ]) monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()] model_constraint = MaxNormDefaultConstraint() exp = Experiment(model, train_set, valid_set, test_set, iterator=iterator, loss_function=F.nll_loss, optimizer=optimizer, model_constraint=model_constraint, monitors=monitors, stop_criterion=stop_criterion, remember_best_column='valid_misclass', run_after_early_stop=True, cuda=cuda) exp.run() # th.save(model, "models\{}-cropped-singleSubjectNum{}-{}sec-{}epoches-torch_model".format(model_type, single_subject_num, ((trial_length - 1) / 256), epoches)) return exp
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda): train_filename = 'A{:02d}T.gdf'.format(subject_id) test_filename = 'A{:02d}E.gdf'.format(subject_id) train_filepath = os.path.join(data_folder, train_filename) test_filepath = os.path.join(data_folder, test_filename) train_label_filepath = train_filepath.replace('.gdf', '.mat') test_label_filepath = test_filepath.replace('.gdf', '.mat') train_loader = BCICompetition4Set2A(train_filepath, labels_filename=train_label_filepath) test_loader = BCICompetition4Set2A(test_filepath, labels_filename=test_label_filepath) train_cnt = train_loader.load() test_cnt = test_loader.load() # Preprocessing train_cnt = train_cnt.drop_channels( ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right']) assert len(train_cnt.ch_names) == 22 # lets convert to millvolt for numerical stability of next operations train_cnt = mne_apply(lambda a: a * 1e6, train_cnt) train_cnt = mne_apply( lambda a: bandpass_cnt( a, low_cut_hz, 38, train_cnt.info['sfreq'], filt_order=3, axis=1), train_cnt) train_cnt = mne_apply( lambda a: exponential_running_standardize( a.T, factor_new=1e-3, init_block_size=1000, eps=1e-4).T, train_cnt) test_cnt = test_cnt.drop_channels( ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right']) assert len(test_cnt.ch_names) == 22 test_cnt = mne_apply(lambda a: a * 1e6, test_cnt) test_cnt = mne_apply( lambda a: bandpass_cnt( a, low_cut_hz, 38, test_cnt.info['sfreq'], filt_order=3, axis=1), test_cnt) test_cnt = mne_apply( lambda a: exponential_running_standardize( a.T, factor_new=1e-3, init_block_size=1000, eps=1e-4).T, test_cnt) marker_def = OrderedDict([('Left Hand', [1]), ( 'Right Hand', [2], ), ('Foot', [3]), ('Tongue', [4])]) ival = [-500, 4000] train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival) test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival) train_set, valid_set = split_into_two_sets(train_set, first_set_fraction=0.8) set_random_seeds(seed=20190706, cuda=cuda) n_classes = 4 n_chans = int(train_set.X.shape[1]) input_time_length = train_set.X.shape[2] if model == 'shallow': model = ShallowFBCSPNet(n_chans, n_classes, input_time_length=input_time_length, final_conv_length='auto').create_network() elif model == 'deep': model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length, final_conv_length='auto').create_network() if cuda: model.cuda() log.info("Model: \n{:s}".format(str(model))) optimizer = optim.Adam(model.parameters()) iterator = BalancedBatchSizeIterator(batch_size=60) stop_criterion = Or([MaxEpochs(1600), NoDecrease('valid_misclass', 160)]) monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()] model_constraint = MaxNormDefaultConstraint() exp = Experiment(model, train_set, valid_set, test_set, iterator=iterator, loss_function=F.nll_loss, optimizer=optimizer, model_constraint=model_constraint, monitors=monitors, stop_criterion=stop_criterion, remember_best_column='valid_misclass', run_after_early_stop=True, cuda=cuda) exp.run() return exp
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda): ival = [-500, 4000] input_time_length = 1000 max_epochs = 800 max_increase_epochs = 80 batch_size = 60 high_cut_hz = 38 factor_new = 1e-3 init_block_size = 1000 valid_set_fraction = 0.2 train_filename = 'A{:02d}T.gdf'.format(subject_id) test_filename = 'A{:02d}E.gdf'.format(subject_id) train_filepath = os.path.join(data_folder, train_filename) test_filepath = os.path.join(data_folder, test_filename) train_label_filepath = train_filepath.replace('.gdf', '.mat') test_label_filepath = test_filepath.replace('.gdf', '.mat') train_loader = BCICompetition4Set2A(train_filepath, labels_filename=train_label_filepath) test_loader = BCICompetition4Set2A(test_filepath, labels_filename=test_label_filepath) train_cnt = train_loader.load() test_cnt = test_loader.load() # Preprocessing train_cnt = train_cnt.drop_channels( ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right']) assert len(train_cnt.ch_names) == 22 # lets convert to millvolt for numerical stability of next operations train_cnt = mne_apply(lambda a: a * 1e6, train_cnt) train_cnt = mne_apply( lambda a: bandpass_cnt(a, low_cut_hz, high_cut_hz, train_cnt.info['sfreq'], filt_order=3, axis=1), train_cnt) train_cnt = mne_apply( lambda a: exponential_running_standardize(a.T, factor_new=factor_new, init_block_size= init_block_size, eps=1e-4).T, train_cnt) test_cnt = test_cnt.drop_channels( ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right']) assert len(test_cnt.ch_names) == 22 test_cnt = mne_apply(lambda a: a * 1e6, test_cnt) test_cnt = mne_apply( lambda a: bandpass_cnt(a, low_cut_hz, high_cut_hz, test_cnt.info['sfreq'], filt_order=3, axis=1), test_cnt) test_cnt = mne_apply( lambda a: exponential_running_standardize(a.T, factor_new=factor_new, init_block_size= init_block_size, eps=1e-4).T, test_cnt) marker_def = OrderedDict([('Left Hand', [1]), ( 'Right Hand', [2], ), ('Foot', [3]), ('Tongue', [4])]) train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival) test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival) train_set, valid_set = split_into_two_sets(train_set, first_set_fraction=1 - valid_set_fraction) set_random_seeds(seed=20190706, cuda=cuda) n_classes = 4 n_chans = int(train_set.X.shape[1]) if model == 'shallow': model = ShallowFBCSPNet(n_chans, n_classes, input_time_length=input_time_length, final_conv_length=30).create_network() elif model == 'deep': model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length, final_conv_length=2).create_network() to_dense_prediction_model(model) if cuda: model.cuda() log.info("Model: \n{:s}".format(str(model))) dummy_input = np_to_var(train_set.X[:1, :, :, None]) if cuda: dummy_input = dummy_input.cuda() out = model(dummy_input) n_preds_per_input = out.cpu().data.numpy().shape[2] optimizer = optim.Adam(model.parameters()) iterator = CropsFromTrialsIterator(batch_size=batch_size, input_time_length=input_time_length, n_preds_per_input=n_preds_per_input) stop_criterion = Or([ MaxEpochs(max_epochs), NoDecrease('valid_misclass', max_increase_epochs) ]) monitors = [ LossMonitor(), MisclassMonitor(col_suffix='sample_misclass'), CroppedTrialMisclassMonitor(input_time_length=input_time_length), RuntimeMonitor() ] model_constraint = MaxNormDefaultConstraint() loss_function = lambda preds, targets: F.nll_loss( th.mean(preds, dim=2, keepdim=False), targets) exp = Experiment(model, train_set, valid_set, test_set, iterator=iterator, loss_function=loss_function, optimizer=optimizer, model_constraint=model_constraint, monitors=monitors, stop_criterion=stop_criterion, remember_best_column='valid_misclass', run_after_early_stop=True, cuda=cuda) exp.run() return exp
def run_exp_on_high_gamma_dataset(train_filename, test_filename, low_cut_hz, model_name, max_epochs, max_increase_epochs, np_th_seed, debug): train_set, valid_set, test_set = load_train_valid_test( train_filename=train_filename, test_filename=test_filename, low_cut_hz=low_cut_hz, debug=debug) if debug: max_epochs = 4 set_random_seeds(np_th_seed, cuda=True) #torch.backends.cudnn.benchmark = True# sometimes crashes? n_classes = int(np.max(train_set.y) + 1) n_chans = int(train_set.X.shape[1]) input_time_length = 1000 if model_name == 'deep': model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length, final_conv_length=2).create_network() elif model_name == 'shallow': model = ShallowFBCSPNet(n_chans, n_classes, input_time_length=input_time_length, final_conv_length=30).create_network() to_dense_prediction_model(model) model.cuda() model.eval() out = model(np_to_var(train_set.X[:1, :, :input_time_length, None]).cuda()) n_preds_per_input = out.cpu().data.numpy().shape[2] optimizer = optim.Adam(model.parameters(), weight_decay=0, lr=1e-3) iterator = CropsFromTrialsIterator(batch_size=60, input_time_length=input_time_length, n_preds_per_input=n_preds_per_input, seed=np_th_seed) monitors = [ LossMonitor(), MisclassMonitor(col_suffix='sample_misclass'), CroppedTrialMisclassMonitor(input_time_length=input_time_length), RuntimeMonitor() ] model_constraint = MaxNormDefaultConstraint() loss_function = lambda preds, targets: F.nll_loss(th.mean(preds, dim=2), targets) run_after_early_stop = True do_early_stop = True remember_best_column = 'valid_misclass' stop_criterion = Or([ MaxEpochs(max_epochs), NoDecrease('valid_misclass', max_increase_epochs) ]) exp = Experiment(model, train_set, valid_set, test_set, iterator=iterator, loss_function=loss_function, optimizer=optimizer, model_constraint=model_constraint, monitors=monitors, stop_criterion=stop_criterion, remember_best_column=remember_best_column, run_after_early_stop=run_after_early_stop, cuda=True, do_early_stop=do_early_stop) exp.run() return exp
max_epochs = 20000 max_increase_epochs = 360 model = ShallowFBCSPNet(in_chan, db.n_classes, input_time_length=time_steps, final_conv_length="auto").create_network() log.info("Model: \n{:s}".format(str(model))) optimizer = optim.Adam(model.parameters()) iterator = BalancedBatchSizeIterator(batch_size=batch_size) stop_criterion = Or([ MaxEpochs(max_epochs), NoDecrease("valid_misclass", max_increase_epochs), ]) monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()] model_constraint = MaxNormDefaultConstraint() exp = Experiment( model, train_set, valid_set, test_set, iterator=iterator, loss_function=F.nll_loss, optimizer=optimizer, model_constraint=model_constraint,
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda): ival = [-500, 4000] max_epochs = 1600 max_increase_epochs = 160 batch_size = 60 high_cut_hz = 38 factor_new = 1e-3 init_block_size = 1000 valid_set_fraction = 0.2 train_filename = "A{:02d}T.gdf".format(subject_id) test_filename = "A{:02d}E.gdf".format(subject_id) train_filepath = os.path.join(data_folder, train_filename) test_filepath = os.path.join(data_folder, test_filename) train_label_filepath = train_filepath.replace(".gdf", ".mat") test_label_filepath = test_filepath.replace(".gdf", ".mat") train_loader = BCICompetition4Set2A( train_filepath, labels_filename=train_label_filepath ) test_loader = BCICompetition4Set2A( test_filepath, labels_filename=test_label_filepath ) train_cnt = train_loader.load() test_cnt = test_loader.load() # Preprocessing train_cnt = train_cnt.drop_channels( ["EOG-left", "EOG-central", "EOG-right"] ) assert len(train_cnt.ch_names) == 22 # lets convert to millvolt for numerical stability of next operations train_cnt = mne_apply(lambda a: a * 1e6, train_cnt) train_cnt = mne_apply( lambda a: bandpass_cnt( a, low_cut_hz, high_cut_hz, train_cnt.info["sfreq"], filt_order=3, axis=1, ), train_cnt, ) train_cnt = mne_apply( lambda a: exponential_running_standardize( a.T, factor_new=factor_new, init_block_size=init_block_size, eps=1e-4, ).T, train_cnt, ) test_cnt = test_cnt.drop_channels(["EOG-left", "EOG-central", "EOG-right"]) assert len(test_cnt.ch_names) == 22 test_cnt = mne_apply(lambda a: a * 1e6, test_cnt) test_cnt = mne_apply( lambda a: bandpass_cnt( a, low_cut_hz, high_cut_hz, test_cnt.info["sfreq"], filt_order=3, axis=1, ), test_cnt, ) test_cnt = mne_apply( lambda a: exponential_running_standardize( a.T, factor_new=factor_new, init_block_size=init_block_size, eps=1e-4, ).T, test_cnt, ) marker_def = OrderedDict( [ ("Left Hand", [1]), ("Right Hand", [2]), ("Foot", [3]), ("Tongue", [4]), ] ) train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival) test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival) train_set, valid_set = split_into_two_sets( train_set, first_set_fraction=1 - valid_set_fraction ) set_random_seeds(seed=20190706, cuda=cuda) n_classes = 4 n_chans = int(train_set.X.shape[1]) input_time_length = train_set.X.shape[2] if model == "shallow": model = ShallowFBCSPNet( n_chans, n_classes, input_time_length=input_time_length, final_conv_length="auto", ).create_network() elif model == "deep": model = Deep4Net( n_chans, n_classes, input_time_length=input_time_length, final_conv_length="auto", ).create_network() if cuda: model.cuda() log.info("Model: \n{:s}".format(str(model))) optimizer = optim.Adam(model.parameters()) iterator = BalancedBatchSizeIterator(batch_size=batch_size) stop_criterion = Or( [ MaxEpochs(max_epochs), NoDecrease("valid_misclass", max_increase_epochs), ] ) monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()] model_constraint = MaxNormDefaultConstraint() exp = Experiment( model, train_set, valid_set, test_set, iterator=iterator, loss_function=F.nll_loss, optimizer=optimizer, model_constraint=model_constraint, monitors=monitors, stop_criterion=stop_criterion, remember_best_column="valid_misclass", run_after_early_stop=True, cuda=cuda, ) exp.run() return exp
def network_model(subject_id, model_type, data_type, cropped, cuda, parameters, hyp_params): best_params = dict() # dictionary to store hyper-parameter values #####Parameter passed to funciton##### max_epochs = parameters['max_epochs'] max_increase_epochs = parameters['max_increase_epochs'] batch_size = parameters['batch_size'] #####Constant Parameters##### best_loss = 100.0 # instatiate starting point for loss iterator = BalancedBatchSizeIterator(batch_size=batch_size) stop_criterion = Or([MaxEpochs(max_epochs), NoDecrease('valid_misclass', max_increase_epochs)]) monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()] model_constraint = MaxNormDefaultConstraint() epoch = 4096 #####Collect and format data##### if data_type == 'words': data, labels = format_data(data_type, subject_id, epoch) data = data[:,:,768:1280] # within-trial window selected for classification elif data_type == 'vowels': data, labels = format_data(data_type, subject_id, epoch) data = data[:,:,512:1024] elif data_type == 'all_classes': data, labels = format_data(data_type, subject_id, epoch) data = data[:,:,768:1280] x = lambda a: a * 1e6 # improves numerical stability data = x(data) data = normalize(data) data, labels = balanced_subsample(data, labels) # downsampling the data to ensure equal classes data, _, labels, _ = train_test_split(data, labels, test_size=0, random_state=42) # redundant shuffle of data/labels #####model inputs##### unique, counts = np.unique(labels, return_counts=True) n_classes = len(unique) n_chans = int(data.shape[1]) input_time_length = data.shape[2] #####k-fold nested corss-validation##### num_folds = 4 skf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=10) out_fold_num = 0 # outer-fold number cv_scores = [] #####Outer=Fold##### for inner_ind, outer_index in skf.split(data, labels): inner_fold, outer_fold = data[inner_ind], data[outer_index] inner_labels, outer_labels = labels[inner_ind], labels[outer_index] out_fold_num += 1 # list for storing cross-validated scores loss_with_params = dict()# for storing param values and losses in_fold_num = 0 # inner-fold number #####Inner-Fold##### for train_idx, valid_idx in skf.split(inner_fold, inner_labels): X_Train, X_val = inner_fold[train_idx], inner_fold[valid_idx] y_train, y_val = inner_labels[train_idx], inner_labels[valid_idx] in_fold_num += 1 train_set = SignalAndTarget(X_Train, y_train) valid_set = SignalAndTarget(X_val, y_val) loss_with_params[f"Fold_{in_fold_num}"] = dict() ####Nested cross-validation##### for drop_prob in hyp_params['drop_prob']: for loss_function in hyp_params['loss']: for i in range(len(hyp_params['lr_adam'])): model = None # ensure no duplication of models # model, learning-rate and optimizer setup according to model_type if model_type == 'shallow': model = ShallowFBCSPNet(in_chans=n_chans, n_classes=n_classes, input_time_length=input_time_length, n_filters_time=80, filter_time_length=40, n_filters_spat=80, pool_time_length=75, pool_time_stride=25, final_conv_length='auto', conv_nonlin=square, pool_mode='max', pool_nonlin=safe_log, split_first_layer=True, batch_norm=True, batch_norm_alpha=0.1, drop_prob=drop_prob).create_network() lr = hyp_params['lr_ada'][i] optimizer = optim.Adadelta(model.parameters(), lr=lr, rho=0.9, weight_decay=0.1, eps=1e-8) elif model_type == 'deep': model = Deep4Net(in_chans=n_chans, n_classes=n_classes, input_time_length=input_time_length, final_conv_length='auto', n_filters_time=20, n_filters_spat=20, filter_time_length=10, pool_time_length=3, pool_time_stride=3, n_filters_2=50, filter_length_2=15, n_filters_3=100, filter_length_3=15, n_filters_4=400, filter_length_4=10, first_nonlin=leaky_relu, first_pool_mode='max', first_pool_nonlin=safe_log, later_nonlin=leaky_relu, later_pool_mode='max', later_pool_nonlin=safe_log, drop_prob=drop_prob, double_time_convs=False, split_first_layer=False, batch_norm=True, batch_norm_alpha=0.1, stride_before_pool=False).create_network() #filter_length_4 changed from 15 to 10 lr = hyp_params['lr_ada'][i] optimizer = optim.Adadelta(model.parameters(), lr=lr, weight_decay=0.1, eps=1e-8) elif model_type == 'eegnet': model = EEGNetv4(in_chans=n_chans, n_classes=n_classes, final_conv_length='auto', input_time_length=input_time_length, pool_mode='mean', F1=16, D=2, F2=32, kernel_length=64, third_kernel_size=(8,4), drop_prob=drop_prob).create_network() lr = hyp_params['lr_adam'][i] optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0, eps=1e-8, amsgrad=False) set_random_seeds(seed=20190629, cuda=cuda) if cuda: model.cuda() torch.backends.cudnn.deterministic = True model = torch.nn.DataParallel(model) log.info("%s model: ".format(str(model))) loss_function = loss_function model_loss_function = None #####Setup to run the selected model##### model_test = Experiment(model, train_set, valid_set, test_set=None, iterator=iterator, loss_function=loss_function, optimizer=optimizer, model_constraint=model_constraint, monitors=monitors, stop_criterion=stop_criterion, remember_best_column='valid_misclass', run_after_early_stop=True, model_loss_function=model_loss_function, cuda=cuda, data_type=data_type, subject_id=subject_id, model_type=model_type, cropped=cropped, model_number=str(out_fold_num)) model_test.run() model_loss = model_test.epochs_df['valid_loss'].astype('float') current_val_loss = current_loss(model_loss) loss_with_params[f"Fold_{in_fold_num}"][f"{drop_prob}/{loss_function}/{lr}"] = current_val_loss ####Select and train optimized model##### df = pd.DataFrame(loss_with_params) df['mean'] = df.mean(axis=1) # compute mean loss across k-folds writer_df = f"results_folder\\results\\S{subject_id}\\{model_type}_parameters.xlsx" df.to_excel(writer_df) best_dp, best_loss, best_lr = df.loc[df['mean'].idxmin()].__dict__['_name'].split("/") # extract best param values if str(best_loss[10:13]) == 'nll': best_loss = F.nll_loss elif str(best_loss[10:13]) == 'cro': best_loss = F.cross_entropy print(f"Best parameters: dropout: {best_dp}, loss: {str(best_loss)[10:13]}, lr: {best_lr}") #####Train model on entire inner fold set##### torch.backends.cudnn.deterministic = True model = None #####Create outer-fold validation and test sets##### X_valid, X_test, y_valid, y_test = train_test_split(outer_fold, outer_labels, test_size=0.5, random_state=42, stratify=outer_labels) train_set = SignalAndTarget(inner_fold, inner_labels) valid_set = SignalAndTarget(X_valid, y_valid) test_set = SignalAndTarget(X_test, y_test) if model_type == 'shallow': model = ShallowFBCSPNet(in_chans=n_chans, n_classes=n_classes, input_time_length=input_time_length, n_filters_time=60, filter_time_length=5, n_filters_spat=40, pool_time_length=50, pool_time_stride=15, final_conv_length='auto', conv_nonlin=relu6, pool_mode='mean', pool_nonlin=safe_log, split_first_layer=True, batch_norm=True, batch_norm_alpha=0.1, drop_prob=0.1).create_network() #50 works better than 75 optimizer = optim.Adadelta(model.parameters(), lr=2.0, rho=0.9, weight_decay=0.1, eps=1e-8) elif model_type == 'deep': model = Deep4Net(in_chans=n_chans, n_classes=n_classes, input_time_length=input_time_length, final_conv_length='auto', n_filters_time=20, n_filters_spat=20, filter_time_length=5, pool_time_length=3, pool_time_stride=3, n_filters_2=20, filter_length_2=5, n_filters_3=40, filter_length_3=5, n_filters_4=1500, filter_length_4=10, first_nonlin=leaky_relu, first_pool_mode='mean', first_pool_nonlin=safe_log, later_nonlin=leaky_relu, later_pool_mode='mean', later_pool_nonlin=safe_log, drop_prob=0.1, double_time_convs=False, split_first_layer=True, batch_norm=True, batch_norm_alpha=0.1, stride_before_pool=False).create_network() optimizer = AdamW(model.parameters(), lr=0.1, weight_decay=0) elif model_type == 'eegnet': model = EEGNetv4(in_chans=n_chans, n_classes=n_classes, final_conv_length='auto', input_time_length=input_time_length, pool_mode='mean', F1=16, D=2, F2=32, kernel_length=64, third_kernel_size=(8,4), drop_prob=0.1).create_network() optimizer = optim.Adam(model.parameters(), lr=0.1, weight_decay=0, eps=1e-8, amsgrad=False) if cuda: model.cuda() torch.backends.cudnn.deterministic = True #model = torch.nn.DataParallel(model) log.info("Optimized model") model_loss_function=None #####Setup to run the optimized model##### optimized_model = op_exp(model, train_set, valid_set, test_set=test_set, iterator=iterator, loss_function=best_loss, optimizer=optimizer, model_constraint=model_constraint, monitors=monitors, stop_criterion=stop_criterion, remember_best_column='valid_misclass', run_after_early_stop=True, model_loss_function=model_loss_function, cuda=cuda, data_type=data_type, subject_id=subject_id, model_type=model_type, cropped=cropped, model_number=str(out_fold_num)) optimized_model.run() log.info("Last 5 epochs") log.info("\n" + str(optimized_model.epochs_df.iloc[-5:])) writer = f"results_folder\\results\\S{subject_id}\\{data_type}_{model_type}_{str(out_fold_num)}.xlsx" optimized_model.epochs_df.iloc[-30:].to_excel(writer) accuracy = 1 - np.min(np.array(optimized_model.class_acc)) cv_scores.append(accuracy) # k accuracy scores for this param set. #####Print and store fold accuracies and mean accuracy##### print(f"Class Accuracy: {np.mean(np.array(cv_scores))}") results_df = pd.DataFrame(dict(cv_scores=cv_scores, cv_mean=np.mean(np.array(cv_scores)))) writer2 = f"results_folder\\results\\S{subject_id}\\{data_type}_{model_type}_cvscores.xlsx" results_df.to_excel(writer2) return optimized_model, np.mean(np.array(cv_scores))
def run_experiment(train_set, valid_set, test_set, model_name, optimizer_name, init_lr, scheduler_name, use_norm_constraint, weight_decay, schedule_weight_decay, restarts, max_epochs, max_increase_epochs, np_th_seed): set_random_seeds(np_th_seed, cuda=True) #torch.backends.cudnn.benchmark = True# sometimes crashes? if valid_set is not None: assert max_increase_epochs is not None assert (max_epochs is None) != (restarts is None) if max_epochs is None: max_epochs = np.sum(restarts) n_classes = int(np.max(train_set.y) + 1) n_chans = int(train_set.X.shape[1]) input_time_length = 1000 if model_name == 'deep': model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length, final_conv_length=2).create_network() elif model_name == 'shallow': model = ShallowFBCSPNet(n_chans, n_classes, input_time_length=input_time_length, final_conv_length=30).create_network() elif model_name in [ 'resnet-he-uniform', 'resnet-he-normal', 'resnet-xavier-normal', 'resnet-xavier-uniform' ]: init_name = model_name.lstrip('resnet-') from torch.nn import init init_fn = { 'he-uniform': lambda w: init.kaiming_uniform(w, a=0), 'he-normal': lambda w: init.kaiming_normal(w, a=0), 'xavier-uniform': lambda w: init.xavier_uniform(w, gain=1), 'xavier-normal': lambda w: init.xavier_normal(w, gain=1) }[init_name] model = EEGResNet(in_chans=n_chans, n_classes=n_classes, input_time_length=input_time_length, final_pool_length=10, n_first_filters=48, conv_weight_init_fn=init_fn).create_network() else: raise ValueError("Unknown model name {:s}".format(model_name)) if 'resnet' not in model_name: to_dense_prediction_model(model) model.cuda() model.eval() out = model(np_to_var(train_set.X[:1, :, :input_time_length, None]).cuda()) n_preds_per_input = out.cpu().data.numpy().shape[2] if optimizer_name == 'adam': optimizer = optim.Adam(model.parameters(), weight_decay=weight_decay, lr=init_lr) elif optimizer_name == 'adamw': optimizer = AdamW(model.parameters(), weight_decay=weight_decay, lr=init_lr) iterator = CropsFromTrialsIterator(batch_size=60, input_time_length=input_time_length, n_preds_per_input=n_preds_per_input, seed=np_th_seed) if scheduler_name is not None: assert schedule_weight_decay == (optimizer_name == 'adamw') if scheduler_name == 'cosine': n_updates_per_epoch = sum( [1 for _ in iterator.get_batches(train_set, shuffle=True)]) if restarts is None: n_updates_per_period = n_updates_per_epoch * max_epochs else: n_updates_per_period = np.array(restarts) * n_updates_per_epoch scheduler = CosineAnnealing(n_updates_per_period) optimizer = ScheduledOptimizer( scheduler, optimizer, schedule_weight_decay=schedule_weight_decay) elif scheduler_name == 'cut_cosine': # TODO: integrate with if clause before, now just separate # to avoid messing with code n_updates_per_epoch = sum( [1 for _ in iterator.get_batches(train_set, shuffle=True)]) if restarts is None: n_updates_per_period = n_updates_per_epoch * max_epochs else: n_updates_per_period = np.array(restarts) * n_updates_per_epoch scheduler = CutCosineAnnealing(n_updates_per_period) optimizer = ScheduledOptimizer( scheduler, optimizer, schedule_weight_decay=schedule_weight_decay) else: raise ValueError("Unknown scheduler") monitors = [ LossMonitor(), MisclassMonitor(col_suffix='sample_misclass'), CroppedTrialMisclassMonitor(input_time_length=input_time_length), RuntimeMonitor() ] if use_norm_constraint: model_constraint = MaxNormDefaultConstraint() else: model_constraint = None # change here this cell loss_function = lambda preds, targets: F.nll_loss(th.mean(preds, dim=2), targets) if valid_set is not None: run_after_early_stop = True do_early_stop = True remember_best_column = 'valid_misclass' stop_criterion = Or([ MaxEpochs(max_epochs), NoDecrease('valid_misclass', max_increase_epochs) ]) else: run_after_early_stop = False do_early_stop = False remember_best_column = None stop_criterion = MaxEpochs(max_epochs) exp = Experiment(model, train_set, valid_set, test_set, iterator=iterator, loss_function=loss_function, optimizer=optimizer, model_constraint=model_constraint, monitors=monitors, stop_criterion=stop_criterion, remember_best_column=remember_best_column, run_after_early_stop=run_after_early_stop, cuda=True, do_early_stop=do_early_stop) exp.run() return exp
def train_model(self, train_set_1, val_set_1, test_set_1, train_set_2, val_set_2, test_set_2, save_model): """ :param train_set_1: (np.array) n_trials*n_channels*n_samples :param val_set_1: (np.array) n_trials*n_channels*n_samples :param test_set_1: (np.array) n_trials*n_channels*n_samples - can be None when training on inner-fold :param train_set_2: (np.array) n_trials*n_channels*n_samples :param val_set_2: (np.array) n_trials*n_channels*n_samples :param test_set_2: (np.array) n_trials*n_channels*n_samples - can be None when training on inner-fold :param save_model: (Bool) True if trained model is to be saved :return: Accuracy and loss scores for the model trained with a given set of hyper-parameters """ model = self.call_model() predictions = None set_random_seeds(seed=20190629, cuda=self.cuda) if self.cuda: model.cuda() torch.backends.cudnn.deterministic = True model = torch.nn.DataParallel(model) log.info(f"Cuda in use") log.info("%s model: ".format(str(model))) optimizer = optim.Adam(model.parameters(), lr=self.learning_rate, weight_decay=0.01, eps=1e-8, amsgrad=False) stop_criterion = Or([ MaxEpochs(self.epochs), NoDecrease('valid_loss', self.max_increase_epochs) ]) model_loss_function = None #####Setup to run the selected model##### model_test = Experiment(model, train_set_1, val_set_1, train_set_2, val_set_2, test_set_1=test_set_1, test_set_2=test_set_2, iterator=self.iterator, loss_function=self.loss, optimizer=optimizer, lr_scheduler=self.lr_scheduler( optimizer, step_size=self.lr_step, gamma=self.lr_gamma), model_constraint=self.model_constraint, monitors=self.monitors, stop_criterion=stop_criterion, remember_best_column='valid_misclass', run_after_early_stop=True, model_loss_function=model_loss_function, cuda=self.cuda, save_file=self.model_save_path, tag=self.tag, save_model=save_model) model_test.run() model_acc = model_test.epochs_df['valid_misclass'].astype('float') model_loss = model_test.epochs_df['valid_loss'].astype('float') current_val_acc = 1 - current_acc(model_acc) current_val_loss = current_loss(model_loss) test_accuracy = None if train_set_1 is not None and test_set_2 is not None: val_metric_index = self.get_model_index(model_test.epochs_df) test_accuracy = round( (1 - model_test.epochs_df['test_misclass'].iloc[val_metric_index]) * 100, 3) predictions = model_test.model_predictions probabilities = model_test.model_probabilities return current_val_acc, current_val_loss, test_accuracy, model_test, predictions, probabilities