Exemplo n.º 1
0
def test_experiment_sample_windows():
    data_rng = RandomState(398765905)
    rand_topo = data_rng.rand(200,10,10,3).astype(np.float32)
    rand_y = np.int32(data_rng.rand(200) > 0.5)
    rand_topo[rand_y == 1] += 0.1
    rand_set = DenseDesignMatrixWrapper(topo_view=rand_topo, y=rand_y)
    
    lasagne.random.set_rng(RandomState(9859295))
    in_layer = InputLayer(shape= [None, 10,5,3])
    network = DenseLayer(incoming=in_layer, name='softmax',
        num_units=2, nonlinearity=lasagne.nonlinearities.softmax)
    updates_modifier = MaxNormConstraint({'softmax': 0.5})
    
    dataset = rand_set
    
    dataset_iterator = WindowsIterator(n_samples_per_window=5, 
                                             batch_size=60)
    
    preprocessor = OnlineAxiswiseStandardize(axis=['c', 1])
    dataset_splitter=FixedTrialSplitter(n_train_trials=150, valid_set_fraction=0.1)
    updates_var_func=lasagne.updates.adam
    loss_var_func= lasagne.objectives.categorical_crossentropy
    monitors=[braindecode.veganlasagne.monitors.LossMonitor (),
                    braindecode.veganlasagne.monitors.WindowMisclassMonitor(),
                    braindecode.veganlasagne.monitors.RuntimeMonitor()]
    stop_criterion= braindecode.veganlasagne.stopping.MaxEpochs(num_epochs=5)
    
    
    exp = Experiment(network, dataset, dataset_splitter, preprocessor,
              dataset_iterator, loss_var_func, updates_var_func, 
              updates_modifier, monitors, stop_criterion,
              remember_best_chan='valid_misclass',
              run_after_early_stop=True)
    exp.setup()
    exp.run()
    
    assert np.allclose(
        [0.629630,0.140741,0.029630,0.022222,0.000000,0.000000,0.000000],
        exp.monitor_chans['train_misclass'],
        rtol=1e-4, atol=1e-4)
    assert np.allclose(
        [0.400000,0.133333,0.066667,0.000000,0.000000,0.000000,0.000000],
        exp.monitor_chans['valid_misclass'],
        rtol=1e-4, atol=1e-4)
    assert np.allclose(
        [0.560000,0.060000,0.000000,0.000000,0.000000,0.000000,0.000000],
        exp.monitor_chans['test_misclass'],
        rtol=1e-4, atol=1e-4)
    assert np.allclose(
        [1.180485, 0.574264, 0.420023, 0.330909, 0.278569, 0.245692, 0.242845],
        exp.monitor_chans['train_loss'],
        rtol=1e-4, atol=1e-4)
    assert np.allclose(
        [1.016782, 0.514049, 0.370485, 0.288948, 0.240913, 0.211189, 0.215967],
        exp.monitor_chans['valid_loss'],
        rtol=1e-4, atol=1e-4)
    assert np.allclose(
        [1.031832, 0.504570, 0.352317, 0.269810, 0.223904, 0.196681, 0.197899],
        exp.monitor_chans['test_loss'],
        rtol=1e-4, atol=1e-4)
Exemplo n.º 2
0
    def evaluate(self, X, y):
        """
        Evaluate, i.e., compute metrics on given inputs and targets.

        Parameters
        ----------
        X: ndarray
            Input data.
        y: 1darray
            Targets.

        Returns
        -------
        result: dict
            Dictionary with result metrics.

        """
        X = _ensure_float32(X)
        stop_criterion = MaxEpochs(0)
        train_set = SignalAndTarget(X, y)
        model_constraint = None
        valid_set = None
        test_set = None
        loss_function = self.loss
        if self.cropped:
            loss_function = lambda outputs, targets: self.loss(
                th.mean(outputs, dim=2), targets
            )

        # reset runtime monitor if exists...
        for monitor in self.monitors:
            if hasattr(monitor, "last_call_time"):
                monitor.last_call_time = time.time()
        exp = Experiment(
            self.network,
            train_set,
            valid_set,
            test_set,
            iterator=self.iterator,
            loss_function=loss_function,
            optimizer=self.optimizer,
            model_constraint=model_constraint,
            monitors=self.monitors,
            stop_criterion=stop_criterion,
            remember_best_column=None,
            run_after_early_stop=False,
            cuda=self.is_cuda,
            log_0_epoch=True,
            do_early_stop=False,
        )

        exp.monitor_epoch({"train": train_set})

        result_dict = dict(
            [
                (key.replace("train_", ""), val)
                for key, val in dict(exp.epochs_df.iloc[0]).items()
            ]
        )
        return result_dict
Exemplo n.º 3
0
def train(config):
    cuda = True
    model = config['model']
    if model == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=2,
                         config=config).create_network()

    to_dense_prediction_model(model)
    if cuda:
        model.cuda()

    log.info("Model: \n{:s}".format(str(model)))
    dummy_input = np_to_var(train_set.X[:1, :, :, None])
    if cuda:
        dummy_input = dummy_input.cuda()
    out = model(dummy_input)

    n_preds_per_input = out.cpu().data.numpy().shape[2]

    optimizer = optim.Adam(model.parameters())

    iterator = CropsFromTrialsIterator(batch_size=60,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)

    stop_criterion = Or([MaxEpochs(20), NoDecrease('valid_misclass', 80)])

    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length=input_time_length),
        RuntimeMonitor()
    ]

    model_constraint = MaxNormDefaultConstraint()

    loss_function = lambda preds, targets: F.nll_loss(
        th.mean(preds, dim=2, keepdim=False), targets)

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=loss_function,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    print(exp.rememberer)
    return exp.rememberer.lowest_val
Exemplo n.º 4
0
def network_model(model, train_set, test_set, valid_set, n_chans, input_time_length, cuda):
	
	max_epochs = 30 
	max_increase_epochs = 10 
	batch_size = 64 
	init_block_size = 1000

	set_random_seeds(seed=20190629, cuda=cuda)

	n_classes = 2 
	n_chans = n_chans
	input_time_length = input_time_length

	if model == 'deep':
		model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length,
						 final_conv_length='auto').create_network()

	elif model == 'shallow':
		model = ShallowFBCSPNet(n_chans, n_classes, input_time_length=input_time_length,
								final_conv_length='auto').create_network()

	if cuda:
		model.cuda()

	log.info("%s model: ".format(str(model))) 

	optimizer = AdamW(model.parameters(), lr=0.00625, weight_decay=0)

	iterator = BalancedBatchSizeIterator(batch_size=batch_size) 

	stop_criterion = Or([MaxEpochs(max_epochs),
						 NoDecrease('valid_misclass', max_increase_epochs)])

	monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

	model_constraint = None
	print(train_set.X.shape[0]) 

	model_test = Experiment(model, train_set, valid_set, test_set, iterator=iterator,
							loss_function=F.nll_loss, optimizer=optimizer,
							model_constraint=model_constraint, monitors=monitors,
							stop_criterion=stop_criterion, remember_best_column='valid_misclass',
							run_after_early_stop=True, cuda=cuda)

	model_test.run()
	return model_test 
Exemplo n.º 5
0
def train(train_set, test_set, model, iterator, monitors, loss_function,
          max_epochs, cuda):
    if cuda:
        model.cuda()

    optimizer = AdamW(model.parameters(),
                      lr=1 * 0.01,
                      weight_decay=0.5 *
                      0.001)  # these are good values for the deep model

    stop_criterion = MaxEpochs(max_epochs)
    model_constraint = MaxNormDefaultConstraint()

    n_updates_per_epoch = sum(
        [1 for _ in iterator.get_batches(train_set, shuffle=True)])
    n_updates_per_period = n_updates_per_epoch * max_epochs
    scheduler = CosineAnnealing(n_updates_per_period)
    optimizer = ScheduledOptimizer(scheduler,
                                   optimizer,
                                   schedule_weight_decay=True)

    exp = Experiment(model,
                     train_set,
                     None,
                     test_set,
                     iterator=iterator,
                     loss_function=loss_function,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     remember_best_column=None,
                     stop_criterion=stop_criterion,
                     cuda=cuda,
                     run_after_early_stop=False,
                     do_early_stop=False)
    exp.run()
    return exp
Exemplo n.º 6
0
    def evaluate(self, X, y, batch_size=32):
        # Create a dummy experiment for the evaluation
        iterator = BalancedBatchSizeIterator(batch_size=batch_size,
                                             seed=0)  # seed irrelevant
        stop_criterion = MaxEpochs(0)
        train_set = SignalAndTarget(X, y)
        model_constraint = None
        valid_set = None
        test_set = None
        loss_function = self.loss
        if self.cropped:
            loss_function = lambda outputs, targets: \
                self.loss(th.mean(outputs, dim=2), targets)
        exp = Experiment(self.network,
                         train_set,
                         valid_set,
                         test_set,
                         iterator=iterator,
                         loss_function=loss_function,
                         optimizer=self.optimizer,
                         model_constraint=model_constraint,
                         monitors=self.monitors,
                         stop_criterion=stop_criterion,
                         remember_best_column=None,
                         run_after_early_stop=False,
                         cuda=self.cuda,
                         print_0_epoch=False,
                         do_early_stop=False)

        exp.monitor_epoch({'train': train_set})

        result_dict = dict([
            (key.replace('train_', ''), val)
            for key, val in dict(exp.epochs_df.iloc[0]).items()
        ])
        return result_dict
Exemplo n.º 7
0
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda):
    train_filename = 'A{:02d}T.gdf'.format(subject_id)
    test_filename = 'A{:02d}E.gdf'.format(subject_id)
    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)
    train_label_filepath = train_filepath.replace('.gdf', '.mat')
    test_label_filepath = test_filepath.replace('.gdf', '.mat')

    train_loader = BCICompetition4Set2A(train_filepath,
                                        labels_filename=train_label_filepath)
    test_loader = BCICompetition4Set2A(test_filepath,
                                       labels_filename=test_label_filepath)
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()

    # Preprocessing

    train_cnt = train_cnt.drop_channels(
        ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right'])
    assert len(train_cnt.ch_names) == 22
    # lets convert to millvolt for numerical stability of next operations
    train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
    train_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a, low_cut_hz, 38, train_cnt.info['sfreq'], filt_order=3, axis=1),
        train_cnt)
    train_cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T, factor_new=1e-3, init_block_size=1000, eps=1e-4).T, train_cnt)

    test_cnt = test_cnt.drop_channels(
        ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right'])
    assert len(test_cnt.ch_names) == 22
    test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
    test_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a, low_cut_hz, 38, test_cnt.info['sfreq'], filt_order=3, axis=1),
        test_cnt)
    test_cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T, factor_new=1e-3, init_block_size=1000, eps=1e-4).T, test_cnt)

    marker_def = OrderedDict([('Left Hand', [1]), (
        'Right Hand',
        [2],
    ), ('Foot', [3]), ('Tongue', [4])])
    ival = [-500, 4000]

    train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
    test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)

    train_set, valid_set = split_into_two_sets(train_set,
                                               first_set_fraction=0.8)

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 4
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length='auto').create_network()
    elif model == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length='auto').create_network()
    if cuda:
        model.cuda()
    log.info("Model: \n{:s}".format(str(model)))

    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=60)

    stop_criterion = Or([MaxEpochs(1600), NoDecrease('valid_misclass', 160)])

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=F.nll_loss,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    return exp
Exemplo n.º 8
0
    def _run_experiments_with_string(self, experiment_index, train_str):
        assert experiment_index >= self._get_start_id()
        assert experiment_index < self._get_stop_id()
        lasagne.random.set_rng(RandomState(9859295))
        # Save train string now, will be overwritten later after 
        # input dimensions determined, save now for debug in
        # case of crash
        if not self._dry_run:
            self._save_train_string(train_str, experiment_index)
        starttime = time.time()
        
        train_dict = self._load_without_layers(train_str)
        log.info("With params...")
        if not self._quiet:
            pprint(train_dict['original_params'])
        if self._dry_run:
            # Do not do the loading or training...
            # Only go until here to show the train params
            return
        
        if self._batch_test:
        # TODO: put into function
        # load layers, load data with dimensions of the layer
        # create experiment with max epochs 2, run
            from braindecode.datasets.random import RandomSet
            train_str = train_str.replace('in_cols', '1')
            train_str = train_str.replace('in_sensors', '32')
            train_dict =  yaml_parse.load(train_str)
            layers = load_layers_from_dict(train_dict)
            final_layer = layers[-1]
            n_chans = layers[0].shape[1]
            n_classes = final_layer.output_shape[1]
            n_samples = 500000
            # set n sample perds in case of cnt model
            if (np.any([hasattr(l, 'n_stride') for l in layers])):
                n_sample_preds =  get_n_sample_preds(final_layer)
                log.info("Setting n_sample preds automatically to {:d}".format(
                    n_sample_preds))
                for monitor in train_dict['exp_args']['monitors']:
                    if hasattr(monitor, 'n_sample_preds'):
                        monitor.n_sample_preds = n_sample_preds
                train_dict['exp_args']['iterator'].n_sample_preds = n_sample_preds
                log.info("Input window length is {:d}".format(
                    get_model_input_window(final_layer)))
                # make at least batches
                n_samples = int(n_sample_preds * 1.5 * 200)
            dataset = RandomSet(topo_shape=[n_samples, n_chans, 1, 1], 
                y_shape=[n_samples, n_classes])
            dataset.load()
            splitter = FixedTrialSplitter(n_train_trials=int(n_samples*0.8), 
                valid_set_fraction=0.1)
            train_dict['exp_args']['preprocessor'] = None
            train_dict['exp_args']['stop_criterion'] = MaxEpochs(1)
            train_dict['exp_args']['iterator'].batch_size = 1
            # TODO: set stop criterion to max epochs =1
            #  change batch_size in iterator
            exp = Experiment(final_layer, dataset, splitter,
                **train_dict['exp_args'])
            exp.setup()
            exp.run_until_early_stop()
            datasets = exp.dataset_provider.get_train_valid_test(exp.dataset)
            for batch_size in range(32,200,5):
                train_dict['exp_args']['stop_criterion'].num_epochs += 2
                log.info("Running with batch size {:d}".format(batch_size))
                train_dict['exp_args']['iterator'].batch_size = batch_size
                exp.run_until_stop(datasets, remember_best=False)
            return
            
            
        dataset = train_dict['dataset'] 
        dataset.load()
        iterator = train_dict['exp_args']['iterator']
        splitter = train_dict['dataset_splitter']
        if dataset.__class__.__name__ == 'EpilepsySet':
            log.info("Reducing to float16 for epilepsy set...")
            dataset.seizure_topo = np.float16(dataset.seizure_topo)
            dataset.non_seizure_topo = np.float16(dataset.non_seizure_topo)
        else:
            # todo: remove this?
            log.info("Determining dataset dimensions to set possible model params...")
            train_set = splitter.split_into_train_valid_test(dataset)['train']
            batch_gen = iterator.get_batches(train_set, shuffle=True)
            dummy_batch_topo = batch_gen.next()[0]
            del train_set
            # not for ultrasound: assert 'in_sensors' in train_str
            # not for cnt net assert 'in_rows' in train_str
            # not for resnet: assert 'in_cols' in train_str
            train_str = train_str.replace('in_sensors',
                str(dummy_batch_topo.shape[1]))
            train_str = train_str.replace('in_rows',
                str(dummy_batch_topo.shape[2]))
            train_str = train_str.replace('in_cols', 
                str(dummy_batch_topo.shape[3]))
        
        self._save_train_string(train_str, experiment_index)
        
        
        # reset rng for actual loading of layers, so you can reproduce it 
        # when you load the file later
        lasagne.random.set_rng(RandomState(9859295))
        train_dict =  yaml_parse.load(train_str)
            
        layers = load_layers_from_dict(train_dict)
        final_layer = layers[-1]
        assert len(np.setdiff1d(layers, 
            lasagne.layers.get_all_layers(final_layer))) == 0, ("All layers "
            "should be used, unused {:s}".format(str(np.setdiff1d(layers, 
            lasagne.layers.get_all_layers(final_layer)))))
        # Set n sample preds in case of cnt model
        if (np.any([hasattr(l, 'n_stride') for l in layers])):
            # Can this be moved up and duplication in if clause( batch test,
            # more above) be removed?
            n_sample_preds =  get_n_sample_preds(final_layer)
            log.info("Setting n_sample preds automatically to {:d}".format(
                n_sample_preds))
            for monitor in train_dict['exp_args']['monitors']:
                if hasattr(monitor, 'n_sample_preds'):
                    monitor.n_sample_preds = n_sample_preds
            train_dict['exp_args']['iterator'].n_sample_preds = n_sample_preds
            log.info("Input window length is {:d}".format(
                get_model_input_window(final_layer)))
        
        if not self._cross_validation:
            # for now lets not do that, current models seem fine again.
#             if (dataset.__class__.__name__ == 'EpilepsySet') and self._pred_loss_hack:
#                 from braindecode.epilepsy.experiment import EpilepsyExperiment
#                 log.info("Creating epilepsy experiment with the pred loss hack")
#                 exp = EpilepsyExperiment(final_layer, dataset, splitter,
#                     **train_dict['exp_args'])
#             else:
            exp = Experiment(final_layer, dataset, splitter,
                    **train_dict['exp_args'])
            exp.setup()
            exp.run()
            endtime = time.time()
            
            
            model = exp.final_layer
                
            # dummy predictions targets
            predictions = [0,3,1,2,3,4]
            targets = [3,4,1,2,3,4]
                
            result_or_results = Result(parameters=train_dict['original_params'],
                templates={}, 
                training_time=endtime - starttime, 
                monitor_channels=exp.monitor_chans, 
                predictions=predictions,
                targets=targets)
               
                
        else: # cross validation
            assert False, ("cross validation not used in long time, not up to date"
                " for example targets predictions not added")
            # default 5 folds for now
            n_folds = train_dict['num_cv_folds']
            exp_cv = ExperimentCrossValidation(final_layer, 
                dataset, exp_args=train_dict['exp_args'], n_folds=n_folds,
                shuffle=self._shuffle)
            exp_cv.run()
            endtime = time.time()
            result_or_results = []
            for i_fold in xrange(n_folds):
                res = Result(parameters=train_dict['original_params'],
                templates={}, 
                training_time=endtime - starttime, 
                monitor_channels=exp_cv.all_monitor_chans[i_fold], 
                predictions=[0,3,1,2,3,4],
                targets=[3,4,1,2,3,4])
                result_or_results.append(res)
            model = exp_cv.all_layers
            
        if not os.path.exists(self._folder_paths[experiment_index]):
            os.makedirs(self._folder_paths[experiment_index])
        
        result_file_name = self._get_result_save_path(experiment_index)
        
        log.info("Saving result...")
        with open(result_file_name, 'w') as resultfile:
            pickle.dump(result_or_results, resultfile)
        
        model_file_name = self._get_model_save_path(experiment_index)
        param_file_name = model_file_name.replace('.pkl', '.npy')
        np.save(param_file_name, lasagne.layers.get_all_param_values(model))
        
        # Possibly make kaggle submission file
        if isinstance(dataset, KaggleGraspLiftSet) and splitter.use_test_as_valid:
            experiment_save_id = int(
                self._base_save_paths[experiment_index].split("/")[-1])
            create_submission_csv_for_one_subject(self._folder_paths[experiment_index],
                exp.dataset, iterator,
                train_dict['exp_args']['preprocessor'], 
                final_layer, experiment_save_id)
        elif isinstance(dataset, AllSubjectsKaggleGraspLiftSet) and splitter.use_test_as_valid:
            experiment_save_id = int(
                self._base_save_paths[experiment_index].split("/")[-1])
            create_submission_csv_for_all_subject_model(
                self._folder_paths[experiment_index],
                exp.dataset, exp.dataset_provider, iterator,
                final_layer, experiment_save_id)
        elif isinstance(splitter, SeveralSetsSplitter):
            pass # nothing to do in this case

        # very hacky create predictions targets :)
        # Not done earlier as there were weird theano crashes
        if exp.monitors[2].__class__.__name__ == 'CntTrialMisclassMonitor':
            del dataset
            del exp
            add_labels_to_cnt_exp_result(self._base_save_paths[experiment_index])
Exemplo n.º 9
0
def test_experiment_class():
    import mne
    from mne.io import concatenate_raws

    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id, event_codes)

    # Load each of the files
    parts = [mne.io.read_raw_edf(path, preload=True, stim_channel='auto',
                                 verbose='WARNING')
             for path in physionet_paths]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info, meg=False, eeg=True, stim=False,
                                      eog=False,
                                      exclude='bads')

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(raw, events, dict(hands=2, feet=3), tmin=1, tmax=4.1,
                         proj=False, picks=eeg_channel_inds,
                         baseline=None, preload=True)
    import numpy as np
    from braindecode.datautil.signal_target import SignalAndTarget
    from braindecode.datautil.splitters import split_into_two_sets
    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    train_set = SignalAndTarget(X[:60], y=y[:60])
    test_set = SignalAndTarget(X[60:], y=y[60:])

    train_set, valid_set = split_into_two_sets(train_set,
                                               first_set_fraction=0.8)
    from braindecode.models.shallow_fbcsp import ShallowFBCSPNet
    from torch import nn
    from braindecode.torch_ext.util import set_random_seeds
    from braindecode.models.util import to_dense_prediction_model

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = train_set.X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(in_chans=in_chans, n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length=12).create_network()
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    from torch import optim

    optimizer = optim.Adam(model.parameters())

    from braindecode.torch_ext.util import np_to_var
    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    print("{:d} predictions per input/trial".format(n_preds_per_input))

    from braindecode.experiments.experiment import Experiment
    from braindecode.datautil.iterators import CropsFromTrialsIterator
    from braindecode.experiments.monitors import RuntimeMonitor, LossMonitor, \
        CroppedTrialMisclassMonitor, MisclassMonitor
    from braindecode.experiments.stopcriteria import MaxEpochs
    import torch.nn.functional as F
    import torch as th
    from braindecode.torch_ext.modules import Expression
    # Iterator is used to iterate over datasets both for training
    # and evaluation
    iterator = CropsFromTrialsIterator(batch_size=32,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)

    # Loss function takes predictions as they come out of the network and the targets
    # and returns a loss
    loss_function = lambda preds, targets: F.nll_loss(
        th.mean(preds, dim=2, keepdim=False), targets)

    # Could be used to apply some constraint on the models, then should be object
    # with apply method that accepts a module
    model_constraint = None
    # Monitors log the training progress
    monitors = [LossMonitor(), MisclassMonitor(col_suffix='sample_misclass'),
                CroppedTrialMisclassMonitor(input_time_length),
                RuntimeMonitor(), ]
    # Stop criterion determines when the first stop happens
    stop_criterion = MaxEpochs(4)
    exp = Experiment(model, train_set, valid_set, test_set, iterator,
                     loss_function, optimizer, model_constraint,
                     monitors, stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True, batch_modifier=None, cuda=cuda)

    # need to setup python logging before to be able to see anything
    import logging
    import sys
    logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
                        level=logging.DEBUG, stream=sys.stdout)
    exp.run()

    import pandas as pd
    from io import StringIO
    compare_df = pd.read_csv(StringIO(
        'train_loss,valid_loss,test_loss,train_sample_misclass,valid_sample_misclass,'
        'test_sample_misclass,train_misclass,valid_misclass,test_misclass\n'
        '14.167170524597168,13.910758018493652,15.945781707763672,0.5,0.5,'
        '0.5333333333333333,0.5,0.5,0.5333333333333333\n'
        '1.1735659837722778,1.4342904090881348,1.8664429187774658,0.4629567736185384,'
        '0.5120320855614973,0.5336007130124778,0.5,0.5,0.5333333333333333\n'
        '1.3168460130691528,1.60431969165802,1.9181344509124756,0.49298128342245995,'
        '0.5109180035650625,0.531729055258467,0.5,0.5,0.5333333333333333\n'
        '0.8465543389320374,1.280307412147522,1.439755916595459,0.4413435828877005,'
        '0.5461229946524064,0.5283422459893048,0.47916666666666663,0.5,'
        '0.5333333333333333\n0.6977059841156006,1.1762590408325195,1.2779350280761719,'
        '0.40290775401069523,0.588903743315508,0.5307486631016043,0.5,0.5,0.5\n'
        '0.7934166193008423,1.1762590408325195,1.2779350280761719,0.4401069518716577,'
        '0.588903743315508,0.5307486631016043,0.5,0.5,0.5\n0.5982189178466797,'
        '0.8581563830375671,0.9598925113677979,0.32032085561497325,0.47660427807486627,'
        '0.4672905525846702,0.31666666666666665,0.5,0.4666666666666667\n0.5044312477111816,'
        '0.7133197784423828,0.8164243102073669,0.2591354723707665,0.45699643493761144,'
        '0.4393048128342246,0.16666666666666663,0.41666666666666663,0.43333333333333335\n'
        '0.4815250039100647,0.6736412644386292,0.8016976714134216,0.23413547237076648,'
        '0.39505347593582885,0.42932263814616756,0.15000000000000002,0.41666666666666663,0.5\n'))

    for col in compare_df:
        np.testing.assert_allclose(np.array(compare_df[col]),
                                   exp.epochs_df[col],
                                   rtol=1e-3, atol=1e-4)
Exemplo n.º 10
0
def run_exp(epoches, batch_size, subject_num, model_type, cuda, single_subject,
            single_subject_num):
    # ival = [-500, 4000]
    max_increase_epochs = 160

    # Preprocessing
    X, y = loadSubjects(subject_num, single_subject, single_subject_num)
    X = X.astype(np.float32)
    y = y.astype(np.int64)
    X, y = shuffle(X, y)

    trial_length = X.shape[2]
    print("trial_length " + str(trial_length))
    print("trying to run with {} sec trials ".format((trial_length - 1) / 256))
    print("y")
    print(y)
    trainingSampleSize = int(len(X) * 0.6)
    valudationSampleSize = int(len(X) * 0.2)
    testSampleSize = int(len(X) * 0.2)
    print("INFO : Training sample size: {}".format(trainingSampleSize))
    print("INFO : Validation sample size: {}".format(valudationSampleSize))
    print("INFO : Test sample size: {}".format(testSampleSize))

    train_set = SignalAndTarget(X[:trainingSampleSize],
                                y=y[:trainingSampleSize])
    valid_set = SignalAndTarget(
        X[trainingSampleSize:(trainingSampleSize + valudationSampleSize)],
        y=y[trainingSampleSize:(trainingSampleSize + valudationSampleSize)])
    test_set = SignalAndTarget(X[(trainingSampleSize + valudationSampleSize):],
                               y=y[(trainingSampleSize +
                                    valudationSampleSize):])

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 3
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model_type == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length='auto').create_network()
    elif model_type == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length='auto').create_network()
    elif model_type == 'eegnet':
        model = EEGNetv4(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length='auto').create_network()
    if cuda:
        model.cuda()
    log.info("Model: \n{:s}".format(str(model)))

    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=batch_size)

    stop_criterion = Or([
        MaxEpochs(max_epochs),
        NoDecrease('valid_misclass', max_increase_epochs)
    ])

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=F.nll_loss,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    # th.save(model, "models\{}-cropped-singleSubjectNum{}-{}sec-{}epoches-torch_model".format(model_type, single_subject_num, ((trial_length - 1) / 256), epoches))
    return exp
Exemplo n.º 11
0
    def fit(
        self,
        train_X,
        train_y,
        epochs,
        batch_size,
        input_time_length=None,
        validation_data=None,
        model_constraint=None,
        remember_best_column=None,
        scheduler=None,
        log_0_epoch=True,
    ):
        """
        Fit the model using the given training data.
        
        Will set `epochs_df` variable with a pandas dataframe to the history
        of the training process.
        
        Parameters
        ----------
        train_X: ndarray
            Training input data
        train_y: 1darray
            Training labels
        epochs: int
            Number of epochs to train
        batch_size: int
        input_time_length: int, optional
            Super crop size, what temporal size is pushed forward through 
            the network, see cropped decoding tuturial.
        validation_data: (ndarray, 1darray), optional
            X and y for validation set if wanted
        model_constraint: object, optional
            You can supply :class:`.MaxNormDefaultConstraint` if wanted.
        remember_best_column: string, optional
            In case you want to do an early stopping/reset parameters to some
            "best" epoch, define here the monitored value whose minimum
            determines the best epoch.
        scheduler: 'cosine' or None, optional
            Whether to use cosine annealing (:class:`.CosineAnnealing`).
        log_0_epoch: bool
            Whether to compute the metrics once before training as well.

        Returns
        -------
        exp: 
            Underlying braindecode :class:`.Experiment`
        """
        if (not hasattr(self, "compiled")) or (not self.compiled):
            raise ValueError(
                "Compile the model first by calling model.compile(loss, optimizer, metrics)"
            )

        if self.cropped and input_time_length is None:
            raise ValueError(
                "In cropped mode, need to specify input_time_length,"
                "which is the number of timesteps that will be pushed through"
                "the network in a single pass.")

        train_X = _ensure_float32(train_X)
        if self.cropped:
            self.network.eval()
            test_input = np_to_var(
                np.ones(
                    (1, train_X[0].shape[0], input_time_length) +
                    train_X[0].shape[2:],
                    dtype=np.float32,
                ))
            while len(test_input.size()) < 4:
                test_input = test_input.unsqueeze(-1)
            if self.is_cuda:
                test_input = test_input.cuda()
            out = self.network(test_input)
            n_preds_per_input = out.cpu().data.numpy().shape[2]
            self.iterator = CropsFromTrialsIterator(
                batch_size=batch_size,
                input_time_length=input_time_length,
                n_preds_per_input=n_preds_per_input,
                seed=self.seed_rng.randint(0,
                                           np.iinfo(np.int32).max - 1),
            )
        else:
            self.iterator = BalancedBatchSizeIterator(
                batch_size=batch_size,
                seed=self.seed_rng.randint(0,
                                           np.iinfo(np.int32).max - 1),
            )
        if log_0_epoch:
            stop_criterion = MaxEpochs(epochs)
        else:
            stop_criterion = MaxEpochs(epochs - 1)
        train_set = SignalAndTarget(train_X, train_y)
        optimizer = self.optimizer
        if scheduler is not None:
            assert (scheduler == "cosine"
                    ), "Supply either 'cosine' or None as scheduler."
            n_updates_per_epoch = sum([
                1 for _ in self.iterator.get_batches(train_set, shuffle=True)
            ])
            n_updates_per_period = n_updates_per_epoch * epochs
            if scheduler == "cosine":
                scheduler = CosineAnnealing(n_updates_per_period)
            schedule_weight_decay = False
            if optimizer.__class__.__name__ == "AdamW":
                schedule_weight_decay = True
            optimizer = ScheduledOptimizer(
                scheduler,
                self.optimizer,
                schedule_weight_decay=schedule_weight_decay,
            )
        loss_function = self.loss
        if self.cropped:
            loss_function = lambda outputs, targets: self.loss(
                th.mean(outputs, dim=2), targets)
        if validation_data is not None:
            valid_X = _ensure_float32(validation_data[0])
            valid_y = validation_data[1]
            valid_set = SignalAndTarget(valid_X, valid_y)
        else:
            valid_set = None
        test_set = None
        self.monitors = [LossMonitor()]
        if self.cropped:
            self.monitors.append(
                CroppedTrialMisclassMonitor(input_time_length))
        else:
            self.monitors.append(MisclassMonitor())
        if self.extra_monitors is not None:
            self.monitors.extend(self.extra_monitors)
        self.monitors.append(RuntimeMonitor())
        exp = Experiment(
            self.network,
            train_set,
            valid_set,
            test_set,
            iterator=self.iterator,
            loss_function=loss_function,
            optimizer=optimizer,
            model_constraint=model_constraint,
            monitors=self.monitors,
            stop_criterion=stop_criterion,
            remember_best_column=remember_best_column,
            run_after_early_stop=False,
            cuda=self.is_cuda,
            log_0_epoch=log_0_epoch,
            do_early_stop=(remember_best_column is not None),
        )
        exp.run()
        self.epochs_df = exp.epochs_df
        return exp
Exemplo n.º 12
0
def train_hyperopt(params):
    """ Runs one fold with given parameters and returns test misclass."""
    lasagne.random.set_rng(RandomState(9859295))

    template_name = params.pop('template_name')
    params = adjust_params_for_hyperopt(params)

    config_strings = create_config_strings(template_name)
    config_objects = create_config_objects(config_strings)
    templates, _ = create_templates_variants_from_config_objects(
        config_objects)

    processed_templates, params_without_template_params = process_templates(
        templates, params)
    final_params = process_parameters_by_templates(
        params_without_template_params, processed_templates)

    # go to directory above this source-file
    main_template_filename = os.path.dirname(
        os.path.abspath(os.path.dirname(__file__)))
    # then complete path to config
    main_template_filename = os.path.join(main_template_filename, "configs",
                                          "eegnet_template.yaml")

    with (open(main_template_filename, 'r')) as main_template_file:
        main_template_str = main_template_file.read()

    final_params['original_params'] = 'dummy'
    train_str = Template(main_template_str).substitute(final_params)

    def do_not_load_constructor(loader, node):
        return None

    yaml.add_constructor(u'!DoNotLoad', do_not_load_constructor)
    modified_train_str = train_str.replace('layers: ', 'layers: !DoNotLoad ')
    train_dict = yaml_parse.load(modified_train_str)
    dataset = train_dict['dataset']
    dataset.load()
    dataset_provider = train_dict['dataset_provider']

    assert 'in_sensors' in train_str
    assert 'in_rows' in train_str
    assert 'in_cols' in train_str

    train_str = train_str.replace('in_sensors',
                                  str(dataset.get_topological_view().shape[1]))
    train_str = train_str.replace('in_rows',
                                  str(dataset.get_topological_view().shape[2]))
    train_str = train_str.replace('in_cols',
                                  str(dataset.get_topological_view().shape[3]))

    train_dict = yaml_parse.load(train_str)
    layers = train_dict['layers']
    final_layer = layers[-1]

    # turn off debug/info logging
    logging.getLogger("pylearn2").setLevel(logging.WARN)
    logging.getLogger("braindecode").setLevel(logging.WARN)
    exp = Experiment()
    exp.setup(final_layer, dataset_provider, **train_dict['exp_args'])
    exp.run()
    final_misclass = exp.monitor_chans['test_misclass'][-1]
    print("Result for")
    pprint(params)
    print("Final Test misclass: {:5.4f}".format(float(final_misclass)))
    return final_misclass
Exemplo n.º 13
0
optimizer = optim.Adam(model.parameters())

iterator = BalancedBatchSizeIterator(batch_size=batch_size)

stop_criterion = Or([
    MaxEpochs(max_epochs),
    NoDecrease("valid_misclass", max_increase_epochs),
])

monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

model_constraint = MaxNormDefaultConstraint()

exp = Experiment(
    model,
    train_set,
    valid_set,
    test_set,
    iterator=iterator,
    loss_function=F.nll_loss,
    optimizer=optimizer,
    model_constraint=model_constraint,
    monitors=monitors,
    stop_criterion=stop_criterion,
    remember_best_column="valid_misclass",
    run_after_early_stop=True,
    cuda=cuda,
)
exp.run()
Exemplo n.º 14
0
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda):
    ival = [-500, 4000]
    max_epochs = 1600
    max_increase_epochs = 160
    batch_size = 60
    high_cut_hz = 38
    factor_new = 1e-3
    init_block_size = 1000
    valid_set_fraction = 0.2

    train_filename = "A{:02d}T.gdf".format(subject_id)
    test_filename = "A{:02d}E.gdf".format(subject_id)
    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)
    train_label_filepath = train_filepath.replace(".gdf", ".mat")
    test_label_filepath = test_filepath.replace(".gdf", ".mat")

    train_loader = BCICompetition4Set2A(
        train_filepath, labels_filename=train_label_filepath
    )
    test_loader = BCICompetition4Set2A(
        test_filepath, labels_filename=test_label_filepath
    )
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()

    # Preprocessing

    train_cnt = train_cnt.drop_channels(
        ["EOG-left", "EOG-central", "EOG-right"]
    )
    assert len(train_cnt.ch_names) == 22
    # lets convert to millvolt for numerical stability of next operations
    train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
    train_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a,
            low_cut_hz,
            high_cut_hz,
            train_cnt.info["sfreq"],
            filt_order=3,
            axis=1,
        ),
        train_cnt,
    )
    train_cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T,
            factor_new=factor_new,
            init_block_size=init_block_size,
            eps=1e-4,
        ).T,
        train_cnt,
    )

    test_cnt = test_cnt.drop_channels(["EOG-left", "EOG-central", "EOG-right"])
    assert len(test_cnt.ch_names) == 22
    test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
    test_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a,
            low_cut_hz,
            high_cut_hz,
            test_cnt.info["sfreq"],
            filt_order=3,
            axis=1,
        ),
        test_cnt,
    )
    test_cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T,
            factor_new=factor_new,
            init_block_size=init_block_size,
            eps=1e-4,
        ).T,
        test_cnt,
    )

    marker_def = OrderedDict(
        [
            ("Left Hand", [1]),
            ("Right Hand", [2]),
            ("Foot", [3]),
            ("Tongue", [4]),
        ]
    )

    train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
    test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)

    train_set, valid_set = split_into_two_sets(
        train_set, first_set_fraction=1 - valid_set_fraction
    )

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 4
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model == "shallow":
        model = ShallowFBCSPNet(
            n_chans,
            n_classes,
            input_time_length=input_time_length,
            final_conv_length="auto",
        ).create_network()
    elif model == "deep":
        model = Deep4Net(
            n_chans,
            n_classes,
            input_time_length=input_time_length,
            final_conv_length="auto",
        ).create_network()
    if cuda:
        model.cuda()
    log.info("Model: \n{:s}".format(str(model)))

    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=batch_size)

    stop_criterion = Or(
        [
            MaxEpochs(max_epochs),
            NoDecrease("valid_misclass", max_increase_epochs),
        ]
    )

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(
        model,
        train_set,
        valid_set,
        test_set,
        iterator=iterator,
        loss_function=F.nll_loss,
        optimizer=optimizer,
        model_constraint=model_constraint,
        monitors=monitors,
        stop_criterion=stop_criterion,
        remember_best_column="valid_misclass",
        run_after_early_stop=True,
        cuda=cuda,
    )
    exp.run()
    return exp
def run(ex, data_folder, subject_id, n_chans, clean_train,
        low_cut_hz, train_start_ms,kappa_mode, loss_expression,
        network,
        filt_order,
        only_return_exp,):
    start_time = time.time()
    assert (only_return_exp is False) or (n_chans is not None) 
    ex.info['finished'] = False

    valid_subject_id = subject_
    other_subject_ids = range(1,subject_id) + range(subject_id+1, 10)


    other_sets = [create_dataset(
        data_folder, other_sid, train_start_ms, low_cut_hz,
        filt_order, clean_train) for other_sid in other_subject_ids]
    test_set = create_dataset(
        data_folder, subject_id, train_start_ms, low_cut_hz,
        filt_order, clean_train)

    combined_set = other_sets + [test_set]

    def merge_train_test(single_combined_set):
        return concatenate_sets(single_combined_set.train_set,
                                    single_combined_set.test_set)

    if not only_return_exp:
        for i_set, this_set in enumerate(combined_set):
            log.info("Loading {:d} of {:d}".format(i_set + 1,
                                                   len(combined_set)))
            this_set.load()

        merged_sets = [merge_train_test(s) for s in combined_set]

        combined_set = CombinedSet(merged_sets)
        in_chans = merged_sets[0].get_topological_view().shape[1]
    else:
        in_chans = n_chans
    input_time_length = 1000 # implies how many crops are processed in parallel, does _not_ determine receptive field size
    # receptive field size is determined by model architecture

    # ensure reproducibility by resetting lasagne/theano random generator
    lasagne.random.set_rng(RandomState(34734))
    if network == 'deep':
        final_layer = create_deep_net(in_chans, input_time_length)
    else:
        assert network == 'shallow'
        final_layer = create_shallow_net(in_chans, input_time_length)

    print_layers(final_layer)
    
    dataset_splitter = SeveralSetsSplitter(valid_set_fraction=0.1, use_test_as_valid=False)
    iterator = CntWindowTrialIterator(batch_size=45,input_time_length=input_time_length,
                                     n_sample_preds=get_n_sample_preds(final_layer))
        
    monitors = [LossMonitor(),
        CntTrialMisclassMonitor(input_time_length=input_time_length),
        KappaMonitor(input_time_length=iterator.input_time_length, mode=kappa_mode),
        RuntimeMonitor(),]
    
    
    
    
    #debug: n_no_decrease_max_epochs = 2
    #debug: n_max_epochs = 4
    n_no_decrease_max_epochs = 80
    n_max_epochs = 800#100
    # real values for paper were 80 and 800
    stop_criterion = Or([NoDecrease('valid_misclass', num_epochs=n_no_decrease_max_epochs),
                         MaxEpochs(num_epochs=n_max_epochs)])
    
    dataset = combined_set
    splitter = dataset_splitter
    updates_expression = adam
    updates_modifier = MaxNormConstraintWithDefaults({})
    remember_best_chan = 'valid_misclass'
    run_after_early_stop=True
    exp = Experiment(final_layer, dataset,splitter,None,iterator, loss_expression,updates_expression, updates_modifier, monitors, 
               stop_criterion, remember_best_chan, run_after_early_stop, batch_modifier=None)

    if only_return_exp:
        return exp
    
    exp.setup()
    exp.run()
    end_time = time.time()
    run_time = end_time - start_time
    
    ex.info['finished'] = True
    for key in exp.monitor_chans:
        ex.info[key] = exp.monitor_chans[key][-1]
    ex.info['runtime'] = run_time
    save_pkl_artifact(ex, exp.monitor_chans, 'monitor_chans.pkl')
    save_npy_artifact(ex, lasagne.layers.get_all_param_values(exp.final_layer),
        'model_params.npy')
Exemplo n.º 16
0
def test_experiment_fixed_split():
    """ Regression test, checking that values have not changed from original run"""
    data_rng = RandomState(398765905)
    rand_topo = data_rng.rand(200, 10, 10, 3).astype(np.float32)
    rand_y = np.int32(data_rng.rand(200) > 0.5)
    rand_topo[rand_y == 1] += 0.01
    rand_set = DenseDesignMatrixWrapper(topo_view=rand_topo, y=rand_y)

    lasagne.random.set_rng(RandomState(9859295))
    in_layer = InputLayer(shape=[None, 10, 10, 3])
    network = DenseLayer(incoming=in_layer,
                         name="softmax",
                         num_units=2,
                         nonlinearity=lasagne.nonlinearities.softmax)

    updates_modifier = MaxNormConstraint({'softmax': 0.5})
    dataset = rand_set

    dataset_iterator = BalancedBatchIterator(batch_size=60)

    preprocessor = OnlineAxiswiseStandardize(axis=['c', 1])
    dataset_splitter = FixedTrialSplitter(n_train_trials=150,
                                          valid_set_fraction=0.1)
    updates_var_func = lasagne.updates.adam
    loss_var_func = lasagne.objectives.categorical_crossentropy
    monitors = [
        braindecode.veganlasagne.monitors.LossMonitor(),
        braindecode.veganlasagne.monitors.MisclassMonitor(),
        braindecode.veganlasagne.monitors.RuntimeMonitor()
    ]
    stop_criterion = braindecode.veganlasagne.stopping.MaxEpochs(num_epochs=30)

    exp = Experiment(network,
                     dataset,
                     dataset_splitter,
                     preprocessor,
                     dataset_iterator,
                     loss_var_func,
                     updates_var_func,
                     updates_modifier,
                     monitors,
                     stop_criterion,
                     remember_best_chan='valid_misclass',
                     run_after_early_stop=True)
    exp.setup()
    exp.run()
    assert np.allclose([
        0.548148, 0.540741, 0.503704, 0.451852, 0.392593, 0.370370, 0.340741,
        0.281481, 0.237037, 0.207407, 0.192593, 0.177778, 0.133333, 0.111111,
        0.111111, 0.103704, 0.096296, 0.088889, 0.088889, 0.081481, 0.074074,
        0.066667, 0.066667, 0.059259, 0.059259, 0.051852, 0.037037, 0.037037,
        0.029630, 0.029630, 0.029630, 0.053333, 0.053333, 0.053333, 0.053333,
        0.040000, 0.040000, 0.026667, 0.026667, 0.026667, 0.026667, 0.033333,
        0.033333, 0.033333, 0.033333, 0.026667, 0.020000, 0.020000, 0.020000
    ],
                       exp.monitor_chans['train_misclass'],
                       rtol=1e-4,
                       atol=1e-4)
    assert np.allclose([
        0.400000, 0.400000, 0.400000, 0.400000, 0.400000, 0.400000, 0.400000,
        0.400000, 0.333333, 0.333333, 0.333333, 0.266667, 0.266667, 0.266667,
        0.266667, 0.266667, 0.266667, 0.266667, 0.266667, 0.266667, 0.266667,
        0.266667, 0.266667, 0.333333, 0.333333, 0.333333, 0.333333, 0.266667,
        0.266667, 0.266667, 0.266667, 0.266667, 0.266667, 0.266667, 0.266667,
        0.200000, 0.200000, 0.133333, 0.133333, 0.133333, 0.133333, 0.133333,
        0.133333, 0.133333, 0.133333, 0.066667, 0.000000, 0.000000, 0.000000
    ],
                       exp.monitor_chans['valid_misclass'],
                       rtol=1e-4,
                       atol=1e-4)
    assert np.allclose([
        0.460000, 0.420000, 0.420000, 0.420000, 0.420000, 0.440000, 0.420000,
        0.420000, 0.400000, 0.400000, 0.380000, 0.400000, 0.400000, 0.400000,
        0.400000, 0.400000, 0.420000, 0.420000, 0.420000, 0.400000, 0.400000,
        0.400000, 0.380000, 0.380000, 0.380000, 0.380000, 0.400000, 0.400000,
        0.420000, 0.420000, 0.420000, 0.420000, 0.420000, 0.420000, 0.420000,
        0.420000, 0.400000, 0.400000, 0.380000, 0.400000, 0.400000, 0.400000,
        0.400000, 0.400000, 0.360000, 0.360000, 0.380000, 0.380000, 0.380000
    ],
                       exp.monitor_chans['test_misclass'],
                       rtol=1e-4,
                       atol=1e-4)
    assert np.allclose([
        1.200389, 0.777420, 0.740212, 0.705151, 0.672329, 0.641764, 0.613245,
        0.586423, 0.561397, 0.538399, 0.517073, 0.497741, 0.479949, 0.463601,
        0.448505, 0.434583, 0.421652, 0.409739, 0.398721, 0.388490, 0.378988,
        0.370121, 0.361965, 0.354295, 0.347159, 0.340496, 0.334237, 0.328328,
        0.322803, 0.317624, 0.312765, 0.340091, 0.335658, 0.330868, 0.325923,
        0.320895, 0.316027, 0.311290, 0.306683, 0.302364, 0.298264, 0.294475,
        0.290957, 0.287673, 0.284664, 0.281860, 0.279309, 0.276918, 0.274709
    ],
                       exp.monitor_chans['train_loss'],
                       rtol=1e-4,
                       atol=1e-4)
    assert np.allclose([
        0.766092, 0.642237, 0.636960, 0.629884, 0.623676, 0.618789, 0.613821,
        0.609264, 0.605430, 0.601499, 0.598178, 0.594579, 0.591720, 0.589461,
        0.587571, 0.585673, 0.583782, 0.581606, 0.580687, 0.579677, 0.579276,
        0.578903, 0.578918, 0.578901, 0.579020, 0.579575, 0.580291, 0.581120,
        0.581591, 0.582552, 0.583647, 0.585879, 0.582269, 0.571548, 0.555956,
        0.536982, 0.517474, 0.496652, 0.474400, 0.453094, 0.432208, 0.412533,
        0.394271, 0.377036, 0.361311, 0.346461, 0.333406, 0.321266, 0.310158
    ],
                       exp.monitor_chans['valid_loss'],
                       rtol=1e-4,
                       atol=1e-4)
    assert np.allclose([
        1.069603, 0.751982, 0.746711, 0.742126, 0.738055, 0.734703, 0.731921,
        0.729251, 0.727241, 0.724931, 0.723189, 0.721885, 0.720605, 0.719565,
        0.718930, 0.718664, 0.718671, 0.718747, 0.719004, 0.718935, 0.719153,
        0.719381, 0.719815, 0.720419, 0.721205, 0.721993, 0.722759, 0.723534,
        0.724298, 0.724908, 0.725497, 0.725097, 0.725950, 0.726615, 0.726953,
        0.727603, 0.728247, 0.728787, 0.729323, 0.729945, 0.730434, 0.731245,
        0.732168, 0.732949, 0.734086, 0.735250, 0.736381, 0.737502, 0.738444
    ],
                       exp.monitor_chans['test_loss'],
                       rtol=1e-4,
                       atol=1e-4)
Exemplo n.º 17
0
def test_experiment_sample_windows():
    data_rng = RandomState(398765905)
    rand_topo = data_rng.rand(200, 10, 10, 3).astype(np.float32)
    rand_y = np.int32(data_rng.rand(200) > 0.5)
    rand_topo[rand_y == 1] += 0.1
    rand_set = DenseDesignMatrixWrapper(topo_view=rand_topo, y=rand_y)

    lasagne.random.set_rng(RandomState(9859295))
    in_layer = InputLayer(shape=[None, 10, 5, 3])
    network = DenseLayer(incoming=in_layer,
                         name='softmax',
                         num_units=2,
                         nonlinearity=lasagne.nonlinearities.softmax)
    updates_modifier = MaxNormConstraint({'softmax': 0.5})

    dataset = rand_set

    dataset_iterator = WindowsIterator(n_samples_per_window=5, batch_size=60)

    preprocessor = OnlineAxiswiseStandardize(axis=['c', 1])
    dataset_splitter = FixedTrialSplitter(n_train_trials=150,
                                          valid_set_fraction=0.1)
    updates_var_func = lasagne.updates.adam
    loss_var_func = lasagne.objectives.categorical_crossentropy
    monitors = [
        braindecode.veganlasagne.monitors.LossMonitor(),
        braindecode.veganlasagne.monitors.WindowMisclassMonitor(),
        braindecode.veganlasagne.monitors.RuntimeMonitor()
    ]
    stop_criterion = braindecode.veganlasagne.stopping.MaxEpochs(num_epochs=5)

    exp = Experiment(network,
                     dataset,
                     dataset_splitter,
                     preprocessor,
                     dataset_iterator,
                     loss_var_func,
                     updates_var_func,
                     updates_modifier,
                     monitors,
                     stop_criterion,
                     remember_best_chan='valid_misclass',
                     run_after_early_stop=True)
    exp.setup()
    exp.run()

    assert np.allclose(
        [0.629630, 0.140741, 0.029630, 0.022222, 0.000000, 0.000000, 0.000000],
        exp.monitor_chans['train_misclass'],
        rtol=1e-4,
        atol=1e-4)
    assert np.allclose(
        [0.400000, 0.133333, 0.066667, 0.000000, 0.000000, 0.000000, 0.000000],
        exp.monitor_chans['valid_misclass'],
        rtol=1e-4,
        atol=1e-4)
    assert np.allclose(
        [0.560000, 0.060000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000],
        exp.monitor_chans['test_misclass'],
        rtol=1e-4,
        atol=1e-4)
    assert np.allclose(
        [1.180485, 0.574264, 0.420023, 0.330909, 0.278569, 0.245692, 0.242845],
        exp.monitor_chans['train_loss'],
        rtol=1e-4,
        atol=1e-4)
    assert np.allclose(
        [1.016782, 0.514049, 0.370485, 0.288948, 0.240913, 0.211189, 0.215967],
        exp.monitor_chans['valid_loss'],
        rtol=1e-4,
        atol=1e-4)
    assert np.allclose(
        [1.031832, 0.504570, 0.352317, 0.269810, 0.223904, 0.196681, 0.197899],
        exp.monitor_chans['test_loss'],
        rtol=1e-4,
        atol=1e-4)
Exemplo n.º 18
0
def run(
    ex,
    data_folder,
    subject_id,
    n_chans,
    clean_train,
    low_cut_hz,
    train_start_ms,
    kappa_mode,
    loss_expression,
    filt_order,
    only_return_exp,
):
    start_time = time.time()
    assert (only_return_exp is False) or (n_chans is not None)
    ex.info['finished'] = False
    load_sensor_names = None
    train_filename = 'A{:02d}T.mat'.format(subject_id)
    test_filename = 'A{:02d}E.mat'.format(subject_id)
    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)

    # trial ivan in milliseconds
    # these are the samples that will be predicted, so for a
    # network with 2000ms receptive field
    # 1500 means the first receptive field goes from -500 to 1500
    train_segment_ival = [train_start_ms, 4000]
    test_segment_ival = [0, 4000]

    train_loader = BCICompetition4Set2A(train_filepath,
                                        load_sensor_names=load_sensor_names)
    test_loader = BCICompetition4Set2A(test_filepath,
                                       load_sensor_names=load_sensor_names)

    # Preprocessing pipeline in [(function, {args:values)] logic
    cnt_preprocessors = [(resample_cnt, {
        'newfs': 250.0
    }),
                         (bandpass_cnt, {
                             'low_cut_hz': low_cut_hz,
                             'high_cut_hz': 38,
                             'filt_order': filt_order,
                         }), (exponential_standardize_cnt, {})]

    marker_def = {
        '1- Right Hand': [1],
        '2 - Left Hand': [2],
        '3 - Rest': [3],
        '4 - Feet': [4]
    }

    train_signal_proc = SignalProcessor(set_loader=train_loader,
                                        segment_ival=train_segment_ival,
                                        cnt_preprocessors=cnt_preprocessors,
                                        marker_def=marker_def)
    train_set = CntSignalMatrix(signal_processor=train_signal_proc,
                                sensor_names='all')

    test_signal_proc = SignalProcessor(set_loader=test_loader,
                                       segment_ival=test_segment_ival,
                                       cnt_preprocessors=cnt_preprocessors,
                                       marker_def=marker_def)
    test_set = CntSignalMatrix(signal_processor=test_signal_proc,
                               sensor_names='all')

    if clean_train:
        train_cleaner = BCICompetitionIV2ABArtefactMaskCleaner(
            marker_def=marker_def)
    else:
        train_cleaner = NoCleaner()
    test_cleaner = BCICompetitionIV2ABArtefactMaskCleaner(
        marker_def=marker_def)
    combined_set = CombinedCleanedSet(train_set, test_set, train_cleaner,
                                      test_cleaner)
    if not only_return_exp:
        combined_set.load()
        in_chans = train_set.get_topological_view().shape[1]
    else:
        in_chans = n_chans
    input_time_length = 1000  # implies how many crops are processed in parallel, does _not_ determine receptive field size
    # receptive field size is determined by model architecture

    # ensure reproducibility by resetting lasagne/theano random generator
    lasagne.random.set_rng(RandomState(34734))
    final_layer = create_deep_net(in_chans, input_time_length)
    print_layers(final_layer)

    dataset_splitter = SeveralSetsSplitter(valid_set_fraction=0.2,
                                           use_test_as_valid=False)
    iterator = CntWindowTrialIterator(
        batch_size=45,
        input_time_length=input_time_length,
        n_sample_preds=get_n_sample_preds(final_layer))

    monitors = [
        LossMonitor(),
        CntTrialMisclassMonitor(input_time_length=input_time_length),
        KappaMonitor(input_time_length=iterator.input_time_length,
                     mode=kappa_mode),
        RuntimeMonitor(),
    ]

    #debug: n_no_decrease_max_epochs = 2
    #debug: n_max_epochs = 4
    n_no_decrease_max_epochs = 80
    n_max_epochs = 800  #100
    # real values for paper were 80 and 800
    stop_criterion = Or([
        NoDecrease('valid_misclass', num_epochs=n_no_decrease_max_epochs),
        MaxEpochs(num_epochs=n_max_epochs)
    ])

    dataset = combined_set
    splitter = dataset_splitter
    updates_expression = adam
    updates_modifier = MaxNormConstraintWithDefaults({})
    remember_best_chan = 'valid_misclass'
    run_after_early_stop = True
    exp = Experiment(final_layer,
                     dataset,
                     splitter,
                     None,
                     iterator,
                     loss_expression,
                     updates_expression,
                     updates_modifier,
                     monitors,
                     stop_criterion,
                     remember_best_chan,
                     run_after_early_stop,
                     batch_modifier=None)

    if only_return_exp:
        return exp

    exp.setup()
    exp.run()
    end_time = time.time()
    run_time = end_time - start_time

    ex.info['finished'] = True
    for key in exp.monitor_chans:
        ex.info[key] = exp.monitor_chans[key][-1]
    ex.info['runtime'] = run_time
    save_pkl_artifact(ex, exp.monitor_chans, 'monitor_chans.pkl')
    save_npy_artifact(ex, lasagne.layers.get_all_param_values(exp.final_layer),
                      'model_params.npy')
Exemplo n.º 19
0
    def run_exp(i_fold):
        # ensure reproducibility by resetting lasagne/theano random generator
        lasagne.random.set_rng(RandomState(34734))

        d5net = Deep5Net(in_chans=in_chans,
                         input_time_length=input_time_length,
                         num_filters_time=num_filters_time,
                         filter_time_length=filter_time_length,
                         num_filters_spat=num_filters_spat,
                         pool_time_length=pool_time_length,
                         pool_time_stride=pool_time_stride,
                         num_filters_2=num_filters_2,
                         filter_length_2=filter_length_2,
                         num_filters_3=num_filters_3,
                         filter_length_3=filter_length_3,
                         num_filters_4=num_filters_4,
                         filter_length_4=filter_length_4,
                         final_dense_length=final_dense_length,
                         n_classes=n_classes,
                         final_nonlin=final_nonlin,
                         first_nonlin=first_nonlin,
                         first_pool_mode=first_pool_mode,
                         first_pool_nonlin=first_pool_nonlin,
                         later_nonlin=later_nonlin,
                         later_pool_mode=later_pool_mode,
                         later_pool_nonlin=later_pool_nonlin,
                         drop_in_prob=drop_in_prob,
                         drop_prob=drop_prob,
                         batch_norm_alpha=batch_norm_alpha,
                         double_time_convs=double_time_convs,
                         split_first_layer=split_first_layer,
                         batch_norm=batch_norm)
        final_layer = d5net.get_layers()[-1]
        final_layer = ClipLayer(final_layer, 1e-4, 1 - 1e-4)
        dataset_splitter = CntTrialSingleFoldSplitter(n_folds=10,
                                                      i_test_fold=i_fold,
                                                      shuffle=True)
        iterator = CntWindowTrialIterator(
            batch_size=45,
            input_time_length=input_time_length,
            n_sample_preds=get_n_sample_preds(final_layer))

        monitors = [
            LossMonitor(),
            CntTrialMisclassMonitor(input_time_length=input_time_length),
            KappaMonitor(input_time_length=iterator.input_time_length,
                         mode='max'),
            RuntimeMonitor()
        ]

        #n_no_decrease_max_epochs = 2
        #n_max_epochs = 4
        n_no_decrease_max_epochs = 80
        n_max_epochs = 800
        # real values for paper were 80 and 800
        remember_best_chan = 'valid_' + stop_chan
        stop_criterion = Or([
            NoDecrease(remember_best_chan,
                       num_epochs=n_no_decrease_max_epochs),
            MaxEpochs(num_epochs=n_max_epochs)
        ])

        dataset = combined_set
        splitter = dataset_splitter
        updates_expression = adam
        updates_modifier = MaxNormConstraintWithDefaults({})
        preproc = None
        exp = Experiment(final_layer,
                         dataset,
                         splitter,
                         preproc,
                         iterator,
                         loss_expression,
                         updates_expression,
                         updates_modifier,
                         monitors,
                         stop_criterion,
                         remember_best_chan,
                         run_after_early_stop,
                         batch_modifier=None)

        if only_return_exp:
            return exp

        exp.setup()
        exp.run()
        return exp
Exemplo n.º 20
0
def test_experiment_fixed_split():
    """ Regression test, checking that values have not changed from original run"""
    data_rng = RandomState(398765905)
    rand_topo = data_rng.rand(200,10,10,3).astype(np.float32)
    rand_y = np.int32(data_rng.rand(200) > 0.5)
    rand_topo[rand_y == 1] += 0.01
    rand_set = DenseDesignMatrixWrapper(topo_view=rand_topo, y=rand_y)

    lasagne.random.set_rng(RandomState(9859295))
    in_layer = InputLayer(shape= [None, 10,10,3])
    network = DenseLayer(incoming=in_layer, name="softmax",
        num_units=2, nonlinearity=lasagne.nonlinearities.softmax)
    
    
    updates_modifier = MaxNormConstraint({'softmax': 0.5})
    dataset = rand_set
    
    dataset_iterator = BalancedBatchIterator(batch_size=60)
    
    preprocessor = OnlineAxiswiseStandardize (axis=['c', 1])
    dataset_splitter=FixedTrialSplitter(n_train_trials=150, valid_set_fraction=0.1)
    updates_var_func=lasagne.updates.adam
    loss_var_func= lasagne.objectives.categorical_crossentropy
    monitors=[braindecode.veganlasagne.monitors.LossMonitor (),
                    braindecode.veganlasagne.monitors.MisclassMonitor(),
                    braindecode.veganlasagne.monitors.RuntimeMonitor()]
    stop_criterion= braindecode.veganlasagne.stopping.MaxEpochs(num_epochs=30)
    
    
    exp = Experiment(network, dataset, dataset_splitter, preprocessor,
              dataset_iterator, loss_var_func, updates_var_func, 
              updates_modifier, monitors,
              stop_criterion, remember_best_chan='valid_misclass',
              run_after_early_stop=True)
    exp.setup()
    exp.run()
    assert np.allclose(
    [0.548148, 0.540741, 0.503704, 0.451852, 0.392593, 0.370370, 
        0.340741, 0.281481, 0.237037, 0.207407, 0.192593, 0.177778, 
        0.133333, 0.111111, 0.111111, 0.103704, 0.096296, 0.088889, 
        0.088889, 0.081481, 0.074074, 0.066667, 0.066667, 0.059259, 
        0.059259, 0.051852, 0.037037, 0.037037, 0.029630, 0.029630, 
        0.029630, 0.053333, 0.053333, 0.053333, 0.053333, 0.040000, 
        0.040000, 0.026667, 0.026667, 0.026667, 0.026667, 0.033333, 
        0.033333, 0.033333, 0.033333, 0.026667, 0.020000, 0.020000, 
        0.020000],
        exp.monitor_chans['train_misclass'],
        rtol=1e-4, atol=1e-4)
    assert np.allclose(
        [0.400000, 0.400000, 0.400000, 0.400000, 0.400000, 0.400000, 
        0.400000, 0.400000, 0.333333, 0.333333, 0.333333, 0.266667, 
        0.266667, 0.266667, 0.266667, 0.266667, 0.266667, 0.266667, 
        0.266667, 0.266667, 0.266667, 0.266667, 0.266667, 0.333333, 
        0.333333, 0.333333, 0.333333, 0.266667, 0.266667, 0.266667, 
        0.266667, 0.266667, 0.266667, 0.266667, 0.266667, 0.200000, 
        0.200000, 0.133333, 0.133333, 0.133333, 0.133333, 0.133333, 
        0.133333, 0.133333, 0.133333, 0.066667, 0.000000, 0.000000, 
        0.000000],
        exp.monitor_chans['valid_misclass'],
        rtol=1e-4, atol=1e-4)
    assert np.allclose(
        [0.460000, 0.420000, 0.420000, 0.420000, 0.420000, 0.440000, 
        0.420000, 0.420000, 0.400000, 0.400000, 0.380000, 0.400000, 
        0.400000, 0.400000, 0.400000, 0.400000, 0.420000, 0.420000, 
        0.420000, 0.400000, 0.400000, 0.400000, 0.380000, 0.380000, 
        0.380000, 0.380000, 0.400000, 0.400000, 0.420000, 0.420000, 
        0.420000, 0.420000, 0.420000, 0.420000, 0.420000, 0.420000, 
        0.400000, 0.400000, 0.380000, 0.400000, 0.400000, 0.400000, 
        0.400000, 0.400000, 0.360000, 0.360000, 0.380000, 0.380000, 
        0.380000],
        exp.monitor_chans['test_misclass'],
        rtol=1e-4, atol=1e-4)
    assert np.allclose(
        [1.200389, 0.777420, 0.740212, 0.705151, 0.672329, 0.641764, 
        0.613245, 0.586423, 0.561397, 0.538399, 0.517073, 0.497741, 
        0.479949, 0.463601, 0.448505, 0.434583, 0.421652, 0.409739, 
        0.398721, 0.388490, 0.378988, 0.370121, 0.361965, 0.354295, 
        0.347159, 0.340496, 0.334237, 0.328328, 0.322803, 0.317624, 
        0.312765, 0.340091, 0.335658, 0.330868, 0.325923, 0.320895, 
        0.316027, 0.311290, 0.306683, 0.302364, 0.298264, 0.294475, 
        0.290957, 0.287673, 0.284664, 0.281860, 0.279309, 0.276918, 
        0.274709],
        exp.monitor_chans['train_loss'],
        rtol=1e-4, atol=1e-4)
    assert np.allclose(
        [0.766092, 0.642237, 0.636960, 0.629884, 0.623676, 0.618789, 
        0.613821, 0.609264, 0.605430, 0.601499, 0.598178, 0.594579, 
        0.591720, 0.589461, 0.587571, 0.585673, 0.583782, 0.581606, 
        0.580687, 0.579677, 0.579276, 0.578903, 0.578918, 0.578901, 
        0.579020, 0.579575, 0.580291, 0.581120, 0.581591, 0.582552, 
        0.583647, 0.585879, 0.582269, 0.571548, 0.555956, 0.536982, 
        0.517474, 0.496652, 0.474400, 0.453094, 0.432208, 0.412533, 
        0.394271, 0.377036, 0.361311, 0.346461, 0.333406, 0.321266, 
        0.310158],
        exp.monitor_chans['valid_loss'],
        rtol=1e-4, atol=1e-4)
    assert np.allclose(
        [1.069603, 0.751982, 0.746711, 0.742126, 0.738055, 0.734703, 
        0.731921, 0.729251, 0.727241, 0.724931, 0.723189, 0.721885, 
        0.720605, 0.719565, 0.718930, 0.718664, 0.718671, 0.718747, 
        0.719004, 0.718935, 0.719153, 0.719381, 0.719815, 0.720419, 
        0.721205, 0.721993, 0.722759, 0.723534, 0.724298, 0.724908, 
        0.725497, 0.725097, 0.725950, 0.726615, 0.726953, 0.727603, 
        0.728247, 0.728787, 0.729323, 0.729945, 0.730434, 0.731245, 
        0.732168, 0.732949, 0.734086, 0.735250, 0.736381, 0.737502, 
        0.738444],
        exp.monitor_chans['test_loss'],
        rtol=1e-4, atol=1e-4)
Exemplo n.º 21
0
def run_exp_on_high_gamma_dataset(train_filename, test_filename, low_cut_hz,
                                  model_name, max_epochs, max_increase_epochs,
                                  np_th_seed, debug):
    train_set, valid_set, test_set = load_train_valid_test(
        train_filename=train_filename,
        test_filename=test_filename,
        low_cut_hz=low_cut_hz,
        debug=debug)
    if debug:
        max_epochs = 4

    set_random_seeds(np_th_seed, cuda=True)
    #torch.backends.cudnn.benchmark = True# sometimes crashes?
    n_classes = int(np.max(train_set.y) + 1)
    n_chans = int(train_set.X.shape[1])
    input_time_length = 1000
    if model_name == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=2).create_network()
    elif model_name == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length=30).create_network()

    to_dense_prediction_model(model)
    model.cuda()
    model.eval()

    out = model(np_to_var(train_set.X[:1, :, :input_time_length, None]).cuda())

    n_preds_per_input = out.cpu().data.numpy().shape[2]
    optimizer = optim.Adam(model.parameters(), weight_decay=0, lr=1e-3)

    iterator = CropsFromTrialsIterator(batch_size=60,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input,
                                       seed=np_th_seed)

    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length=input_time_length),
        RuntimeMonitor()
    ]

    model_constraint = MaxNormDefaultConstraint()

    loss_function = lambda preds, targets: F.nll_loss(th.mean(preds, dim=2),
                                                      targets)

    run_after_early_stop = True
    do_early_stop = True
    remember_best_column = 'valid_misclass'
    stop_criterion = Or([
        MaxEpochs(max_epochs),
        NoDecrease('valid_misclass', max_increase_epochs)
    ])

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=loss_function,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column=remember_best_column,
                     run_after_early_stop=run_after_early_stop,
                     cuda=True,
                     do_early_stop=do_early_stop)
    exp.run()
    return exp
Exemplo n.º 22
0
def run(
    ex,
    data_folder,
    subject_id,
    n_chans,
    only_return_exp,
):
    start_time = time.time()
    assert (only_return_exp is False) or (n_chans is not None)
    ex.info['finished'] = False
    load_sensor_names = None
    train_filename = 'A{:02d}T.mat'.format(subject_id)
    test_filename = 'A{:02d}E.mat'.format(subject_id)
    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)

    # trial ivan in milliseconds
    # these are the samples that will be predicted, so for a
    # network with 2000ms receptive field
    # 1500 means the first receptive field goes from -500 to 1500
    segment_ival = [1500, 4000]

    train_loader = BCICompetition4Set2A(train_filepath,
                                        load_sensor_names=load_sensor_names)
    test_loader = BCICompetition4Set2A(test_filepath,
                                       load_sensor_names=load_sensor_names)

    # Preprocessing pipeline in [(function, {args:values)] logic
    cnt_preprocessors = [(resample_cnt, {
        'newfs': 250.0
    }), (bandpass_cnt, {
        'low_cut_hz': 0,
        'high_cut_hz': 38,
    }), (exponential_standardize_cnt, {})]

    marker_def = {
        '1- Right Hand': [1],
        '2 - Left Hand': [2],
        '3 - Rest': [3],
        '4 - Feet': [4]
    }

    train_signal_proc = SignalProcessor(set_loader=train_loader,
                                        segment_ival=segment_ival,
                                        cnt_preprocessors=cnt_preprocessors,
                                        marker_def=marker_def)
    train_set = CntSignalMatrix(signal_processor=train_signal_proc,
                                sensor_names='all')

    test_signal_proc = SignalProcessor(set_loader=test_loader,
                                       segment_ival=segment_ival,
                                       cnt_preprocessors=cnt_preprocessors,
                                       marker_def=marker_def)
    test_set = CntSignalMatrix(signal_processor=test_signal_proc,
                               sensor_names='all')

    from braindecode.mywyrm.clean import MaxAbsCleaner
    train_cleaner = MaxAbsCleaner(segment_ival=[0, 4000],
                                  threshold=800,
                                  marker_def=marker_def)
    test_cleaner = MaxAbsCleaner(segment_ival=[0, 4000],
                                 threshold=800,
                                 marker_def=marker_def)
    combined_set = CombinedCleanedSet(train_set, test_set, train_cleaner,
                                      test_cleaner)
    if not only_return_exp:
        combined_set.load()

    in_chans = train_set.get_topological_view().shape[1]
    input_time_length = 1000  # implies how many crops are processed in parallel, does _not_ determine receptive field size
    # receptive field size is determined by model architecture
    num_filters_time = 25
    filter_time_length = 10
    num_filters_spat = 25
    pool_time_length = 3
    pool_time_stride = 3
    num_filters_2 = 50
    filter_length_2 = 10
    num_filters_3 = 100
    filter_length_3 = 10
    num_filters_4 = 200
    filter_length_4 = 10
    final_dense_length = 2
    n_classes = 4
    final_nonlin = softmax
    first_nonlin = elu
    first_pool_mode = 'max'
    first_pool_nonlin = identity
    later_nonlin = elu
    later_pool_mode = 'max'
    later_pool_nonlin = identity
    drop_in_prob = 0.0
    drop_prob = 0.5
    batch_norm_alpha = 0.1
    double_time_convs = False
    split_first_layer = True
    batch_norm = True
    # ensure reproducibility by resetting lasagne/theano random generator
    lasagne.random.set_rng(RandomState(34734))

    d5net = Deep5Net(in_chans=in_chans,
                     input_time_length=input_time_length,
                     num_filters_time=num_filters_time,
                     filter_time_length=filter_time_length,
                     num_filters_spat=num_filters_spat,
                     pool_time_length=pool_time_length,
                     pool_time_stride=pool_time_stride,
                     num_filters_2=num_filters_2,
                     filter_length_2=filter_length_2,
                     num_filters_3=num_filters_3,
                     filter_length_3=filter_length_3,
                     num_filters_4=num_filters_4,
                     filter_length_4=filter_length_4,
                     final_dense_length=final_dense_length,
                     n_classes=n_classes,
                     final_nonlin=final_nonlin,
                     first_nonlin=first_nonlin,
                     first_pool_mode=first_pool_mode,
                     first_pool_nonlin=first_pool_nonlin,
                     later_nonlin=later_nonlin,
                     later_pool_mode=later_pool_mode,
                     later_pool_nonlin=later_pool_nonlin,
                     drop_in_prob=drop_in_prob,
                     drop_prob=drop_prob,
                     batch_norm_alpha=batch_norm_alpha,
                     double_time_convs=double_time_convs,
                     split_first_layer=split_first_layer,
                     batch_norm=batch_norm)
    final_layer = d5net.get_layers()[-1]
    print_layers(final_layer)

    dataset_splitter = SeveralSetsSplitter(valid_set_fraction=0.2,
                                           use_test_as_valid=False)
    iterator = CntWindowTrialIterator(
        batch_size=45,
        input_time_length=input_time_length,
        n_sample_preds=get_n_sample_preds(final_layer))

    monitors = [
        LossMonitor(),
        CntTrialMisclassMonitor(input_time_length=input_time_length),
        RuntimeMonitor()
    ]

    #debug: n_no_decrease_max_epochs = 2
    #debug: n_max_epochs = 4
    n_no_decrease_max_epochs = 80
    n_max_epochs = 800  #100
    # real values for paper were 80 and 800
    stop_criterion = Or([
        NoDecrease('valid_misclass', num_epochs=n_no_decrease_max_epochs),
        MaxEpochs(num_epochs=n_max_epochs)
    ])

    dataset = combined_set
    splitter = dataset_splitter
    loss_expression = categorical_crossentropy
    updates_expression = adam
    updates_modifier = MaxNormConstraintWithDefaults({})
    remember_best_chan = 'valid_misclass'
    run_after_early_stop = True
    exp = Experiment(final_layer,
                     dataset,
                     splitter,
                     None,
                     iterator,
                     loss_expression,
                     updates_expression,
                     updates_modifier,
                     monitors,
                     stop_criterion,
                     remember_best_chan,
                     run_after_early_stop,
                     batch_modifier=None)

    if only_return_exp:
        return exp

    exp.setup()
    exp.run()
    end_time = time.time()
    run_time = end_time - start_time

    ex.info['finished'] = True
    ex.info['runtime'] = run_time
    for key in exp.monitor_chans:
        ex.info[key] = exp.monitor_chans[key][-1]
    save_pkl_artifact(ex, exp.monitor_chans, 'monitor_chans.pkl')
Exemplo n.º 23
0
    def _run_experiments_with_string(self, experiment_index, train_str):
        assert experiment_index >= self._get_start_id()
        assert experiment_index < self._get_stop_id()
        lasagne.random.set_rng(RandomState(9859295))
        # Save train string now, will be overwritten later after
        # input dimensions determined, save now for debug in
        # case of crash
        if not self._dry_run:
            self._save_train_string(train_str, experiment_index)
        starttime = time.time()

        train_dict = self._load_without_layers(train_str)
        log.info("With params...")
        if not self._quiet:
            pprint(train_dict['original_params'])
        if self._dry_run:
            # Do not do the loading or training...
            # Only go until here to show the train params
            return

        if self._batch_test:
            # TODO: put into function
            # load layers, load data with dimensions of the layer
            # create experiment with max epochs 2, run
            from braindecode.datasets.random import RandomSet
            train_str = train_str.replace('in_cols', '1')
            train_str = train_str.replace('in_sensors', '32')
            train_dict = yaml_parse.load(train_str)
            layers = load_layers_from_dict(train_dict)
            final_layer = layers[-1]
            n_chans = layers[0].shape[1]
            n_classes = final_layer.output_shape[1]
            n_samples = 500000
            # set n sample perds in case of cnt model
            if (np.any([hasattr(l, 'n_stride') for l in layers])):
                n_sample_preds = get_n_sample_preds(final_layer)
                log.info("Setting n_sample preds automatically to {:d}".format(
                    n_sample_preds))
                for monitor in train_dict['exp_args']['monitors']:
                    if hasattr(monitor, 'n_sample_preds'):
                        monitor.n_sample_preds = n_sample_preds
                train_dict['exp_args'][
                    'iterator'].n_sample_preds = n_sample_preds
                log.info("Input window length is {:d}".format(
                    get_model_input_window(final_layer)))
                # make at least batches
                n_samples = int(n_sample_preds * 1.5 * 200)
            dataset = RandomSet(topo_shape=[n_samples, n_chans, 1, 1],
                                y_shape=[n_samples, n_classes])
            dataset.load()
            splitter = FixedTrialSplitter(n_train_trials=int(n_samples * 0.8),
                                          valid_set_fraction=0.1)
            train_dict['exp_args']['preprocessor'] = None
            train_dict['exp_args']['stop_criterion'] = MaxEpochs(1)
            train_dict['exp_args']['iterator'].batch_size = 1
            # TODO: set stop criterion to max epochs =1
            #  change batch_size in iterator
            exp = Experiment(final_layer, dataset, splitter,
                             **train_dict['exp_args'])
            exp.setup()
            exp.run_until_early_stop()
            datasets = exp.dataset_provider.get_train_valid_test(exp.dataset)
            for batch_size in range(32, 200, 5):
                train_dict['exp_args']['stop_criterion'].num_epochs += 2
                log.info("Running with batch size {:d}".format(batch_size))
                train_dict['exp_args']['iterator'].batch_size = batch_size
                exp.run_until_stop(datasets, remember_best=False)
            return

        dataset = train_dict['dataset']
        dataset.load()
        iterator = train_dict['exp_args']['iterator']
        splitter = train_dict['dataset_splitter']
        if dataset.__class__.__name__ == 'EpilepsySet':
            log.info("Reducing to float16 for epilepsy set...")
            dataset.seizure_topo = np.float16(dataset.seizure_topo)
            dataset.non_seizure_topo = np.float16(dataset.non_seizure_topo)
        else:
            # todo: remove this?
            log.info(
                "Determining dataset dimensions to set possible model params..."
            )
            train_set = splitter.split_into_train_valid_test(dataset)['train']
            batch_gen = iterator.get_batches(train_set, shuffle=True)
            dummy_batch_topo = batch_gen.next()[0]
            del train_set
            # not for ultrasound: assert 'in_sensors' in train_str
            # not for cnt net assert 'in_rows' in train_str
            # not for resnet: assert 'in_cols' in train_str
            train_str = train_str.replace('in_sensors',
                                          str(dummy_batch_topo.shape[1]))
            train_str = train_str.replace('in_rows',
                                          str(dummy_batch_topo.shape[2]))
            train_str = train_str.replace('in_cols',
                                          str(dummy_batch_topo.shape[3]))

        self._save_train_string(train_str, experiment_index)

        # reset rng for actual loading of layers, so you can reproduce it
        # when you load the file later
        lasagne.random.set_rng(RandomState(9859295))
        train_dict = yaml_parse.load(train_str)

        layers = load_layers_from_dict(train_dict)
        final_layer = layers[-1]
        assert len(
            np.setdiff1d(
                layers, lasagne.layers.get_all_layers(final_layer))) == 0, (
                    "All layers "
                    "should be used, unused {:s}".format(
                        str(
                            np.setdiff1d(
                                layers,
                                lasagne.layers.get_all_layers(final_layer)))))
        # Set n sample preds in case of cnt model
        if (np.any([hasattr(l, 'n_stride') for l in layers])):
            # Can this be moved up and duplication in if clause( batch test,
            # more above) be removed?
            n_sample_preds = get_n_sample_preds(final_layer)
            log.info("Setting n_sample preds automatically to {:d}".format(
                n_sample_preds))
            for monitor in train_dict['exp_args']['monitors']:
                if hasattr(monitor, 'n_sample_preds'):
                    monitor.n_sample_preds = n_sample_preds
            train_dict['exp_args']['iterator'].n_sample_preds = n_sample_preds
            log.info("Input window length is {:d}".format(
                get_model_input_window(final_layer)))

        if not self._cross_validation:
            # for now lets not do that, current models seem fine again.
            #             if (dataset.__class__.__name__ == 'EpilepsySet') and self._pred_loss_hack:
            #                 from braindecode.epilepsy.experiment import EpilepsyExperiment
            #                 log.info("Creating epilepsy experiment with the pred loss hack")
            #                 exp = EpilepsyExperiment(final_layer, dataset, splitter,
            #                     **train_dict['exp_args'])
            #             else:
            exp = Experiment(final_layer, dataset, splitter,
                             **train_dict['exp_args'])
            exp.setup()
            exp.run()
            endtime = time.time()

            model = exp.final_layer

            # dummy predictions targets
            predictions = [0, 3, 1, 2, 3, 4]
            targets = [3, 4, 1, 2, 3, 4]

            result_or_results = Result(
                parameters=train_dict['original_params'],
                templates={},
                training_time=endtime - starttime,
                monitor_channels=exp.monitor_chans,
                predictions=predictions,
                targets=targets)

        else:  # cross validation
            assert False, (
                "cross validation not used in long time, not up to date"
                " for example targets predictions not added")
            # default 5 folds for now
            n_folds = train_dict['num_cv_folds']
            exp_cv = ExperimentCrossValidation(final_layer,
                                               dataset,
                                               exp_args=train_dict['exp_args'],
                                               n_folds=n_folds,
                                               shuffle=self._shuffle)
            exp_cv.run()
            endtime = time.time()
            result_or_results = []
            for i_fold in xrange(n_folds):
                res = Result(parameters=train_dict['original_params'],
                             templates={},
                             training_time=endtime - starttime,
                             monitor_channels=exp_cv.all_monitor_chans[i_fold],
                             predictions=[0, 3, 1, 2, 3, 4],
                             targets=[3, 4, 1, 2, 3, 4])
                result_or_results.append(res)
            model = exp_cv.all_layers

        if not os.path.exists(self._folder_paths[experiment_index]):
            os.makedirs(self._folder_paths[experiment_index])

        result_file_name = self._get_result_save_path(experiment_index)

        log.info("Saving result to {:s}...".format(result_file_name))
        with open(result_file_name, 'w') as resultfile:
            pickle.dump(result_or_results, resultfile)

        model_file_name = self._get_model_save_path(experiment_index)
        param_file_name = model_file_name.replace('.pkl', '.npy')
        np.save(param_file_name, lasagne.layers.get_all_param_values(model))

        # Possibly make kaggle submission file
        if isinstance(dataset,
                      KaggleGraspLiftSet) and splitter.use_test_as_valid:
            experiment_save_id = int(
                self._base_save_paths[experiment_index].split("/")[-1])
            create_submission_csv_for_one_subject(
                self._folder_paths[experiment_index], exp.dataset, iterator,
                train_dict['exp_args']['preprocessor'], final_layer,
                experiment_save_id)
        elif isinstance(
                dataset,
                AllSubjectsKaggleGraspLiftSet) and splitter.use_test_as_valid:
            experiment_save_id = int(
                self._base_save_paths[experiment_index].split("/")[-1])
            create_submission_csv_for_all_subject_model(
                self._folder_paths[experiment_index], exp.dataset,
                exp.dataset_provider, iterator, final_layer,
                experiment_save_id)
        elif isinstance(splitter, SeveralSetsSplitter):
            pass  # nothing to do in this case

        # very hacky create predictions targets :)
        # Not done earlier as there were weird theano crashes
        if exp.monitors[2].__class__.__name__ == 'CntTrialMisclassMonitor':
            del dataset
            del exp
            add_labels_to_cnt_exp_result(
                self._base_save_paths[experiment_index])
Exemplo n.º 24
0
                        optimizer = ScheduledOptimizer(
                            scheduler,
                            optimizer,
                            schedule_weight_decay=schedule_weight_decay)
                    else:
                        raise ValueError("Unknown scheduler")

            # set up experiment, run
            exp = Experiment(model,
                             train_set,
                             valid_set,
                             test_set,
                             iterator,
                             loss_function,
                             optimizer,
                             model_constraint,
                             monitors,
                             stop_criterion,
                             remember_best_column='train_loss',
                             do_early_stop=False,
                             run_after_early_stop=False,
                             batch_modifier=None,
                             cuda=cuda)
            exp.run()

            # %% plot learning curves

            f, axarr = plt.subplots(2, figsize=(15, 15))
            exp.epochs_df.loc[:,
                              ['train_loss', 'valid_loss', 'test_loss']].plot(
                                  ax=axarr[0], title='loss function')
Exemplo n.º 25
0
def run(ex, data_folder, subject_id, n_chans, train_inds, test_inds,
        sets_like_fbcsp_paper, clean_train, stop_chan, filt_order, low_cut_hz,
        loss_expression, network, only_return_exp, run_after_early_stop):
    start_time = time.time()
    assert (only_return_exp is False) or (n_chans is not None)
    ex.info['finished'] = False

    # trial ival in milliseconds
    # these are the samples that will be predicted, so for a
    # network with 2000ms receptive field
    # 1500 means the first receptive field goes from -500 to 1500
    train_segment_ival = [1500, 4000]
    test_segment_ival = [0, 4000]

    if sets_like_fbcsp_paper:
        if subject_id in [4, 5, 6, 7, 8, 9]:
            train_inds = [3]
        elif subject_id == 1:
            train_inds = [1, 3]
        else:
            assert subject_id in [2, 3]
            train_inds = [1, 2, 3]

    train_loader = MultipleBCICompetition4Set2B(subject_id,
                                                session_ids=train_inds,
                                                data_folder=data_folder)

    test_loader = MultipleBCICompetition4Set2B(subject_id,
                                               session_ids=test_inds,
                                               data_folder=data_folder)

    # Preprocessing pipeline in [(function, {args:values)] logic
    cnt_preprocessors = [(resample_cnt, {
        'newfs': 250.0
    }),
                         (bandpass_cnt, {
                             'low_cut_hz': low_cut_hz,
                             'high_cut_hz': 38,
                             'filt_order': filt_order,
                         }), (exponential_standardize_cnt, {})]

    marker_def = {'1- Left Hand': [1], '2 - Right Hand': [2]}

    train_signal_proc = SignalProcessor(set_loader=train_loader,
                                        segment_ival=train_segment_ival,
                                        cnt_preprocessors=cnt_preprocessors,
                                        marker_def=marker_def)
    train_set = CntSignalMatrix(signal_processor=train_signal_proc,
                                sensor_names='all')

    test_signal_proc = SignalProcessor(set_loader=test_loader,
                                       segment_ival=test_segment_ival,
                                       cnt_preprocessors=cnt_preprocessors,
                                       marker_def=marker_def)
    test_set = CntSignalMatrix(signal_processor=test_signal_proc,
                               sensor_names='all')

    if clean_train:
        train_cleaner = BCICompetitionIV2ABArtefactMaskCleaner(
            marker_def=marker_def)
    else:
        train_cleaner = NoCleaner()
    test_cleaner = BCICompetitionIV2ABArtefactMaskCleaner(
        marker_def=marker_def)
    combined_set = CombinedCleanedSet(train_set, test_set, train_cleaner,
                                      test_cleaner)
    if not only_return_exp:
        combined_set.load()

    lasagne.random.set_rng(RandomState(34734))
    in_chans = train_set.get_topological_view().shape[1]
    input_time_length = 1000  # implies how many crops are processed in parallel, does _not_ determine receptive field size
    # receptive field size is determined by model architecture

    if network == 'deep':
        final_layer = create_deep_net(in_chans, input_time_length)
    else:
        assert network == 'shallow'
        final_layer = create_shallow_net(in_chans, input_time_length)

    dataset_splitter = SeveralSetsSplitter(valid_set_fraction=0.2,
                                           use_test_as_valid=False)
    iterator = CntWindowTrialIterator(
        batch_size=45,
        input_time_length=input_time_length,
        n_sample_preds=get_n_sample_preds(final_layer))

    monitors = [
        LossMonitor(),
        CntTrialMisclassMonitor(input_time_length=input_time_length),
        KappaMonitor(input_time_length=iterator.input_time_length, mode='max'),
        RuntimeMonitor()
    ]

    #debug: n_no_decrease_max_epochs = 2
    #debug: n_max_epochs = 4
    n_no_decrease_max_epochs = 80
    n_max_epochs = 800  #100
    # real values for paper were 80 and 800
    remember_best_chan = 'valid_' + stop_chan
    stop_criterion = Or([
        NoDecrease(remember_best_chan, num_epochs=n_no_decrease_max_epochs),
        MaxEpochs(num_epochs=n_max_epochs)
    ])

    dataset = combined_set
    splitter = dataset_splitter
    updates_expression = adam
    updates_modifier = MaxNormConstraintWithDefaults({})
    exp = Experiment(final_layer,
                     dataset,
                     splitter,
                     None,
                     iterator,
                     loss_expression,
                     updates_expression,
                     updates_modifier,
                     monitors,
                     stop_criterion,
                     remember_best_chan,
                     run_after_early_stop,
                     batch_modifier=None)

    if only_return_exp:
        return exp

    exp.setup()
    exp.run()
    end_time = time.time()
    run_time = end_time - start_time

    ex.info['finished'] = True
    for key in exp.monitor_chans:
        ex.info[key] = exp.monitor_chans[key][-1]
    ex.info['runtime'] = run_time
    save_pkl_artifact(ex, exp.monitor_chans, 'monitor_chans.pkl')
def run_exp(
    data_folders,
    n_recordings,
    sensor_types,
    n_chans,
    max_recording_mins,
    sec_to_cut,
    duration_recording_mins,
    test_recording_mins,
    max_abs_val,
    sampling_freq,
    divisor,
    test_on_eval,
    n_folds,
    i_test_fold,
    shuffle,
    model_name,
    n_start_chans,
    n_chan_factor,
    input_time_length,
    final_conv_length,
    model_constraint,
    init_lr,
    batch_size,
    max_epochs,
    cuda,
):

    import torch.backends.cudnn as cudnn
    cudnn.benchmark = True
    preproc_functions = []
    preproc_functions.append(lambda data, fs: (
        data[:, int(sec_to_cut * fs):-int(sec_to_cut * fs)], fs))
    preproc_functions.append(lambda data, fs: (data[:, :int(
        duration_recording_mins * 60 * fs)], fs))
    if max_abs_val is not None:
        preproc_functions.append(
            lambda data, fs: (np.clip(data, -max_abs_val, max_abs_val), fs))

    preproc_functions.append(lambda data, fs: (resampy.resample(
        data, fs, sampling_freq, axis=1, filter='kaiser_fast'), sampling_freq))

    if divisor is not None:
        preproc_functions.append(lambda data, fs: (data / divisor, fs))

    dataset = DiagnosisSet(n_recordings=n_recordings,
                           max_recording_mins=max_recording_mins,
                           preproc_functions=preproc_functions,
                           data_folders=data_folders,
                           train_or_eval='train',
                           sensor_types=sensor_types)
    if test_on_eval:
        if test_recording_mins is None:
            test_recording_mins = duration_recording_mins
        test_preproc_functions = copy(preproc_functions)
        test_preproc_functions[1] = lambda data, fs: (data[:, :int(
            test_recording_mins * 60 * fs)], fs)
        test_dataset = DiagnosisSet(n_recordings=n_recordings,
                                    max_recording_mins=None,
                                    preproc_functions=test_preproc_functions,
                                    data_folders=data_folders,
                                    train_or_eval='eval',
                                    sensor_types=sensor_types)
    X, y = dataset.load()
    max_shape = np.max([list(x.shape) for x in X], axis=0)
    assert max_shape[1] == int(duration_recording_mins * sampling_freq * 60)
    if test_on_eval:
        test_X, test_y = test_dataset.load()
        max_shape = np.max([list(x.shape) for x in test_X], axis=0)
        assert max_shape[1] == int(test_recording_mins * sampling_freq * 60)
    if not test_on_eval:
        splitter = TrainValidTestSplitter(n_folds,
                                          i_test_fold,
                                          shuffle=shuffle)
        train_set, valid_set, test_set = splitter.split(X, y)
    else:
        splitter = TrainValidSplitter(n_folds,
                                      i_valid_fold=i_test_fold,
                                      shuffle=shuffle)
        train_set, valid_set = splitter.split(X, y)
        test_set = SignalAndTarget(test_X, test_y)
        del test_X, test_y
    del X, y  # shouldn't be necessary, but just to make sure

    set_random_seeds(seed=20170629, cuda=cuda)
    n_classes = 2
    if model_name == 'shallow':
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_start_chans,
            n_filters_spat=n_start_chans,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length).create_network()
    elif model_name == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         n_filters_time=n_start_chans,
                         n_filters_spat=n_start_chans,
                         input_time_length=input_time_length,
                         n_filters_2=int(n_start_chans * n_chan_factor),
                         n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
                         n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
                         final_conv_length=final_conv_length,
                         stride_before_pool=True).create_network()
    elif (model_name == 'deep_smac'):
        if model_name == 'deep_smac':
            do_batch_norm = False
        else:
            assert model_name == 'deep_smac_bnorm'
            do_batch_norm = True
        double_time_convs = False
        drop_prob = 0.244445
        filter_length_2 = 12
        filter_length_3 = 14
        filter_length_4 = 12
        filter_time_length = 21
        final_conv_length = 1
        first_nonlin = elu
        first_pool_mode = 'mean'
        first_pool_nonlin = identity
        later_nonlin = elu
        later_pool_mode = 'mean'
        later_pool_nonlin = identity
        n_filters_factor = 1.679066
        n_filters_start = 32
        pool_time_length = 1
        pool_time_stride = 2
        split_first_layer = True
        n_chan_factor = n_filters_factor
        n_start_chans = n_filters_start
        model = Deep4Net(n_chans,
                         n_classes,
                         n_filters_time=n_start_chans,
                         n_filters_spat=n_start_chans,
                         input_time_length=input_time_length,
                         n_filters_2=int(n_start_chans * n_chan_factor),
                         n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
                         n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
                         final_conv_length=final_conv_length,
                         batch_norm=do_batch_norm,
                         double_time_convs=double_time_convs,
                         drop_prob=drop_prob,
                         filter_length_2=filter_length_2,
                         filter_length_3=filter_length_3,
                         filter_length_4=filter_length_4,
                         filter_time_length=filter_time_length,
                         first_nonlin=first_nonlin,
                         first_pool_mode=first_pool_mode,
                         first_pool_nonlin=first_pool_nonlin,
                         later_nonlin=later_nonlin,
                         later_pool_mode=later_pool_mode,
                         later_pool_nonlin=later_pool_nonlin,
                         pool_time_length=pool_time_length,
                         pool_time_stride=pool_time_stride,
                         split_first_layer=split_first_layer,
                         stride_before_pool=True).create_network()
    elif model_name == 'shallow_smac':
        conv_nonlin = identity
        do_batch_norm = True
        drop_prob = 0.328794
        filter_time_length = 56
        final_conv_length = 22
        n_filters_spat = 73
        n_filters_time = 24
        pool_mode = 'max'
        pool_nonlin = identity
        pool_time_length = 84
        pool_time_stride = 3
        split_first_layer = True
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_filters_time,
            n_filters_spat=n_filters_spat,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length,
            conv_nonlin=conv_nonlin,
            batch_norm=do_batch_norm,
            drop_prob=drop_prob,
            filter_time_length=filter_time_length,
            pool_mode=pool_mode,
            pool_nonlin=pool_nonlin,
            pool_time_length=pool_time_length,
            pool_time_stride=pool_time_stride,
            split_first_layer=split_first_layer,
        ).create_network()
    elif model_name == 'linear':
        model = nn.Sequential()
        model.add_module("conv_classifier",
                         nn.Conv2d(n_chans, n_classes, (600, 1)))
        model.add_module('softmax', nn.LogSoftmax())
        model.add_module('squeeze', Expression(lambda x: x.squeeze(3)))
    else:
        assert False, "unknown model name {:s}".format(model_name)
    to_dense_prediction_model(model)
    log.info("Model:\n{:s}".format(str(model)))
    if cuda:
        model.cuda()
    # determine output size
    test_input = np_to_var(
        np.ones((2, n_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    log.info("In shape: {:s}".format(str(test_input.cpu().data.numpy().shape)))

    out = model(test_input)
    log.info("Out shape: {:s}".format(str(out.cpu().data.numpy().shape)))
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    log.info("{:d} predictions per input/trial".format(n_preds_per_input))
    iterator = CropsFromTrialsIterator(batch_size=batch_size,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)
    optimizer = optim.Adam(model.parameters(), lr=init_lr)

    loss_function = lambda preds, targets: F.nll_loss(
        th.mean(preds, dim=2, keepdim=False), targets)

    if model_constraint is not None:
        assert model_constraint == 'defaultnorm'
        model_constraint = MaxNormDefaultConstraint()
    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedDiagnosisMonitor(input_time_length, n_preds_per_input),
        RuntimeMonitor(),
    ]
    stop_criterion = MaxEpochs(max_epochs)
    batch_modifier = None
    run_after_early_stop = True
    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator,
                     loss_function,
                     optimizer,
                     model_constraint,
                     monitors,
                     stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=run_after_early_stop,
                     batch_modifier=batch_modifier,
                     cuda=cuda)
    exp.run()
    return exp
Exemplo n.º 27
0
def build_exp(model_name, cuda, data, batch_size, max_epochs, max_increase_epochs):

    log.info("==============================")
    log.info("Loading Data...")
    log.info("==============================")

    train_set = data.train_set
    valid_set = data.validation_set
    test_set = data.test_set

    log.info("==============================")
    log.info("Setting Up Model...")
    log.info("==============================")
    set_random_seeds(seed=20190706, cuda=cuda)
    n_classes = 4
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model_name == "shallow":
        model = NewShallowNet(
            n_chans, n_classes, input_time_length, final_conv_length="auto"
        )
        # model = ShallowFBCSPNet(
        #     n_chans,
        #     n_classes,
        #     input_time_length=input_time_length,
        #     final_conv_length="auto",
        # ).create_network()
    elif model_name == "deep":
        model = NewDeep4Net(n_chans, n_classes, input_time_length, "auto")
        # model = Deep4Net(
        #     n_chans,
        #     n_classes,
        #     input_time_length=input_time_length,
        #     final_conv_length="auto",
        # ).create_network()
    elif model_name == "eegnet":
        # model = EEGNet(n_chans, n_classes,
        #                input_time_length=input_time_length)
        # model = EEGNetv4(n_chans, n_classes,
        #                  input_time_length=input_time_length).create_network()
        model = NewEEGNet(n_chans, n_classes, input_time_length=input_time_length)

    if cuda:
        model.cuda()

    log.info("==============================")
    log.info("Logging Model Architecture:")
    log.info("==============================")
    log.info("Model: \n{:s}".format(str(model)))

    log.info("==============================")
    log.info("Building Experiment:")
    log.info("==============================")
    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=batch_size)

    stop_criterion = Or(
        [MaxEpochs(max_epochs), NoDecrease("valid_misclass", max_increase_epochs)]
    )

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(
        model,
        train_set,
        valid_set,
        test_set,
        iterator=iterator,
        loss_function=F.nll_loss,
        optimizer=optimizer,
        model_constraint=model_constraint,
        monitors=monitors,
        stop_criterion=stop_criterion,
        remember_best_column="valid_misclass",
        run_after_early_stop=True,
        cuda=cuda,
    )
    return exp
Exemplo n.º 28
0
def run_experiment(train_set, valid_set, test_set, model_name, optimizer_name,
                   init_lr, scheduler_name, use_norm_constraint, weight_decay,
                   schedule_weight_decay, restarts, max_epochs,
                   max_increase_epochs, np_th_seed):
    set_random_seeds(np_th_seed, cuda=True)
    #torch.backends.cudnn.benchmark = True# sometimes crashes?
    if valid_set is not None:
        assert max_increase_epochs is not None
    assert (max_epochs is None) != (restarts is None)
    if max_epochs is None:
        max_epochs = np.sum(restarts)
    n_classes = int(np.max(train_set.y) + 1)
    n_chans = int(train_set.X.shape[1])
    input_time_length = 1000
    if model_name == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=2).create_network()
    elif model_name == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length=30).create_network()
    elif model_name in [
            'resnet-he-uniform', 'resnet-he-normal', 'resnet-xavier-normal',
            'resnet-xavier-uniform'
    ]:
        init_name = model_name.lstrip('resnet-')
        from torch.nn import init
        init_fn = {
            'he-uniform': lambda w: init.kaiming_uniform(w, a=0),
            'he-normal': lambda w: init.kaiming_normal(w, a=0),
            'xavier-uniform': lambda w: init.xavier_uniform(w, gain=1),
            'xavier-normal': lambda w: init.xavier_normal(w, gain=1)
        }[init_name]
        model = EEGResNet(in_chans=n_chans,
                          n_classes=n_classes,
                          input_time_length=input_time_length,
                          final_pool_length=10,
                          n_first_filters=48,
                          conv_weight_init_fn=init_fn).create_network()
    else:
        raise ValueError("Unknown model name {:s}".format(model_name))
    if 'resnet' not in model_name:
        to_dense_prediction_model(model)
    model.cuda()
    model.eval()

    out = model(np_to_var(train_set.X[:1, :, :input_time_length, None]).cuda())

    n_preds_per_input = out.cpu().data.numpy().shape[2]

    if optimizer_name == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               weight_decay=weight_decay,
                               lr=init_lr)
    elif optimizer_name == 'adamw':
        optimizer = AdamW(model.parameters(),
                          weight_decay=weight_decay,
                          lr=init_lr)

    iterator = CropsFromTrialsIterator(batch_size=60,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input,
                                       seed=np_th_seed)

    if scheduler_name is not None:
        assert schedule_weight_decay == (optimizer_name == 'adamw')
        if scheduler_name == 'cosine':
            n_updates_per_epoch = sum(
                [1 for _ in iterator.get_batches(train_set, shuffle=True)])
            if restarts is None:
                n_updates_per_period = n_updates_per_epoch * max_epochs
            else:
                n_updates_per_period = np.array(restarts) * n_updates_per_epoch
            scheduler = CosineAnnealing(n_updates_per_period)
            optimizer = ScheduledOptimizer(
                scheduler,
                optimizer,
                schedule_weight_decay=schedule_weight_decay)
        elif scheduler_name == 'cut_cosine':
            # TODO: integrate with if clause before, now just separate
            # to avoid messing with code
            n_updates_per_epoch = sum(
                [1 for _ in iterator.get_batches(train_set, shuffle=True)])
            if restarts is None:
                n_updates_per_period = n_updates_per_epoch * max_epochs
            else:
                n_updates_per_period = np.array(restarts) * n_updates_per_epoch
            scheduler = CutCosineAnnealing(n_updates_per_period)
            optimizer = ScheduledOptimizer(
                scheduler,
                optimizer,
                schedule_weight_decay=schedule_weight_decay)
        else:
            raise ValueError("Unknown scheduler")
    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length=input_time_length),
        RuntimeMonitor()
    ]

    if use_norm_constraint:
        model_constraint = MaxNormDefaultConstraint()
    else:
        model_constraint = None
    # change here this cell
    loss_function = lambda preds, targets: F.nll_loss(th.mean(preds, dim=2),
                                                      targets)

    if valid_set is not None:
        run_after_early_stop = True
        do_early_stop = True
        remember_best_column = 'valid_misclass'
        stop_criterion = Or([
            MaxEpochs(max_epochs),
            NoDecrease('valid_misclass', max_increase_epochs)
        ])
    else:
        run_after_early_stop = False
        do_early_stop = False
        remember_best_column = None
        stop_criterion = MaxEpochs(max_epochs)

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=loss_function,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column=remember_best_column,
                     run_after_early_stop=run_after_early_stop,
                     cuda=True,
                     do_early_stop=do_early_stop)
    exp.run()
    return exp
def run_exp(max_recording_mins, n_recordings, sec_to_cut,
            duration_recording_mins, max_abs_val, max_min_threshold,
            max_min_expected, shrink_val, max_min_remove, batch_set_zero_val,
            batch_set_zero_test, sampling_freq, low_cut_hz, high_cut_hz,
            exp_demean, exp_standardize, moving_demean, moving_standardize,
            channel_demean, channel_standardize, divisor, n_folds, i_test_fold,
            input_time_length, final_conv_length, pool_stride, n_blocks_to_add,
            sigmoid, model_constraint, batch_size, max_epochs,
            only_return_exp):
    cuda = True

    preproc_functions = []
    preproc_functions.append(lambda data, fs: (
        data[:, int(sec_to_cut * fs):-int(sec_to_cut * fs)], fs))
    preproc_functions.append(lambda data, fs: (data[:, :int(
        duration_recording_mins * 60 * fs)], fs))
    if max_abs_val is not None:
        preproc_functions.append(
            lambda data, fs: (np.clip(data, -max_abs_val, max_abs_val), fs))
    if max_min_threshold is not None:
        preproc_functions.append(lambda data, fs: (clean_jumps(
            data, 200, max_min_threshold, max_min_expected, cuda), fs))
    if max_min_remove is not None:
        window_len = 200
        preproc_functions.append(lambda data, fs: (set_jumps_to_zero(
            data,
            window_len=window_len,
            threshold=max_min_remove,
            cuda=cuda,
            clip_min_max_to_zero=True), fs))

    if shrink_val is not None:
        preproc_functions.append(lambda data, fs: (shrink_spikes(
            data,
            shrink_val,
            1,
            9,
        ), fs))

    preproc_functions.append(lambda data, fs: (resampy.resample(
        data, fs, sampling_freq, axis=1, filter='kaiser_fast'), sampling_freq))
    preproc_functions.append(lambda data, fs: (bandpass_cnt(
        data, low_cut_hz, high_cut_hz, fs, filt_order=4, axis=1), fs))

    if exp_demean:
        preproc_functions.append(lambda data, fs: (exponential_running_demean(
            data.T, factor_new=0.001, init_block_size=100).T, fs))
    if exp_standardize:
        preproc_functions.append(
            lambda data, fs: (exponential_running_standardize(
                data.T, factor_new=0.001, init_block_size=100).T, fs))
    if moving_demean:
        preproc_functions.append(lambda data, fs: (padded_moving_demean(
            data, axis=1, n_window=201), fs))
    if moving_standardize:
        preproc_functions.append(lambda data, fs: (padded_moving_standardize(
            data, axis=1, n_window=201), fs))
    if channel_demean:
        preproc_functions.append(lambda data, fs: (demean(data, axis=1), fs))
    if channel_standardize:
        preproc_functions.append(lambda data, fs:
                                 (standardize(data, axis=1), fs))
    if divisor is not None:
        preproc_functions.append(lambda data, fs: (data / divisor, fs))

    dataset = DiagnosisSet(n_recordings=n_recordings,
                           max_recording_mins=max_recording_mins,
                           preproc_functions=preproc_functions)
    if not only_return_exp:
        X, y = dataset.load()

    splitter = Splitter(
        n_folds,
        i_test_fold,
    )
    if not only_return_exp:
        train_set, valid_set, test_set = splitter.split(X, y)
        del X, y  # shouldn't be necessary, but just to make sure
    else:
        train_set = None
        valid_set = None
        test_set = None

    set_random_seeds(seed=20170629, cuda=cuda)
    if sigmoid:
        n_classes = 1
    else:
        n_classes = 2
    in_chans = 21

    net = Deep4Net(
        in_chans=in_chans,
        n_classes=n_classes,
        input_time_length=input_time_length,
        final_conv_length=final_conv_length,
        pool_time_length=pool_stride,
        pool_time_stride=pool_stride,
        n_filters_2=50,
        n_filters_3=80,
        n_filters_4=120,
    )
    model = net_with_more_layers(net, n_blocks_to_add, nn.MaxPool2d)
    if sigmoid:
        model = to_linear_plus_minus_net(model)
    optimizer = optim.Adam(model.parameters())
    to_dense_prediction_model(model)
    log.info("Model:\n{:s}".format(str(model)))
    if cuda:
        model.cuda()
    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    log.info("{:d} predictions per input/trial".format(n_preds_per_input))
    iterator = CropsFromTrialsIterator(batch_size=batch_size,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)
    if sigmoid:
        loss_function = lambda preds, targets: binary_cross_entropy_with_logits(
            th.mean(preds, dim=2)[:, 1, 0], targets.type_as(preds))
    else:
        loss_function = lambda preds, targets: F.nll_loss(
            th.mean(preds, dim=2)[:, :, 0], targets)

    if model_constraint is not None:
        model_constraint = MaxNormDefaultConstraint()
    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length),
        RuntimeMonitor(),
    ]
    stop_criterion = MaxEpochs(max_epochs)
    batch_modifier = None
    if batch_set_zero_val is not None:
        batch_modifier = RemoveMinMaxDiff(batch_set_zero_val,
                                          clip_max_abs=True,
                                          set_zero=True)
    if (batch_set_zero_val is not None) and (batch_set_zero_test == True):
        iterator = ModifiedIterator(
            iterator,
            batch_modifier,
        )
        batch_modifier = None
    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator,
                     loss_function,
                     optimizer,
                     model_constraint,
                     monitors,
                     stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     batch_modifier=batch_modifier,
                     cuda=cuda)
    if not only_return_exp:
        exp.run()
    else:
        exp.dataset = dataset
        exp.splitter = splitter

    return exp
Exemplo n.º 30
0
# Monitors log the training progress
monitors = [
    LossMonitor(),
    MisclassMonitor(col_suffix='misclass'),
    SeizureMonitor(input_time_length),
    RuntimeMonitor(),
]
# Stop criterion determines when the first stop happens
stop_criterion = MaxEpochs(5)
exp = Experiment(model,
                 train_set,
                 valid_set,
                 test_set,
                 iterator,
                 loss_function,
                 optimizer,
                 model_constraint,
                 monitors,
                 stop_criterion,
                 remember_best_column='valid_misclass',
                 run_after_early_stop=True,
                 batch_modifier=None,
                 cuda=cuda)

# need to setup python logging before to be able to see anything
import logging
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
                    level=logging.DEBUG,
                    stream=sys.stdout)
exp.run()
Exemplo n.º 31
0
def run_exp(data_folder, session_id, subject_id, low_cut_hz, model, cuda):
    ival = [-500, 4000]
    max_epochs = 1600
    max_increase_epochs = 160
    batch_size = 10
    high_cut_hz = 38
    factor_new = 1e-3
    init_block_size = 1000
    valid_set_fraction = .2
    ''' # BCIcompetition
    train_filename = 'A{:02d}T.gdf'.format(subject_id)
    test_filename = 'A{:02d}E.gdf'.format(subject_id)
    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)
    train_label_filepath = train_filepath.replace('.gdf', '.mat')
    test_label_filepath = test_filepath.replace('.gdf', '.mat')

    train_loader = BCICompetition4Set2A(
        train_filepath, labels_filename=train_label_filepath)
    test_loader = BCICompetition4Set2A(
        test_filepath, labels_filename=test_label_filepath)
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()
    '''

    # GIGAscience
    filename = 'sess{:02d}_subj{:02d}_EEG_MI.mat'.format(
        session_id, subject_id)
    filepath = os.path.join(data_folder, filename)
    train_variable = 'EEG_MI_train'
    test_variable = 'EEG_MI_test'

    train_loader = GIGAscience(filepath, train_variable)
    test_loader = GIGAscience(filepath, test_variable)
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()

    # Preprocessing
    ''' channel
    ['Fp1', 'Fp2', 'F7', 'F3', 'Fz', 'F4', 'F8', 'FC5', 'FC1', 'FC2', 'FC6', 'T7', 'C3', 'Cz', 'C4', 'T8', 'TP9', 'CP5',
     'CP1', 'CP2', 'CP6', 'TP10', 'P7', 'P3', 'Pz', 'P4', 'P8', 'PO9', 'O1', 'Oz', 'O2', 'PO10', 'FC3', 'FC4', 'C5',
     'C1', 'C2', 'C6', 'CP3', 'CPz', 'CP4', 'P1', 'P2', 'POz', 'FT9', 'FTT9h', 'TTP7h', 'TP7', 'TPP9h', 'FT10',
     'FTT10h', 'TPP8h', 'TP8', 'TPP10h', 'F9', 'F10', 'AF7', 'AF3', 'AF4', 'AF8', 'PO3', 'PO4']
    '''

    train_cnt = train_cnt.pick_channels([
        'FC5', 'FC3', 'FC1', 'Fz', 'FC2', 'FC4', 'FC6', 'C5', 'C3', 'C1', 'Cz',
        'C2', 'C4', 'C6', 'CP5', 'CP3', 'CP1', 'CPz', 'CP2', 'CP4', 'CP6', 'Pz'
    ])
    train_cnt, train_cnt.info['events'] = train_cnt.copy().resample(
        250, npad='auto', events=train_cnt.info['events'])

    assert len(train_cnt.ch_names) == 22
    # lets convert to millvolt for numerical stability of next operations
    train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
    train_cnt = mne_apply(
        lambda a: bandpass_cnt(a,
                               low_cut_hz,
                               high_cut_hz,
                               train_cnt.info['sfreq'],
                               filt_order=3,
                               axis=1), train_cnt)
    train_cnt = mne_apply(
        lambda a: exponential_running_standardize(a.T,
                                                  factor_new=factor_new,
                                                  init_block_size=
                                                  init_block_size,
                                                  eps=1e-4).T, train_cnt)

    test_cnt = test_cnt.pick_channels([
        'FC5', 'FC3', 'FC1', 'Fz', 'FC2', 'FC4', 'FC6', 'C5', 'C3', 'C1', 'Cz',
        'C2', 'C4', 'C6', 'CP5', 'CP3', 'CP1', 'CPz', 'CP2', 'CP4', 'CP6', 'Pz'
    ])
    test_cnt, test_cnt.info['events'] = test_cnt.copy().resample(
        250, npad='auto', events=test_cnt.info['events'])

    assert len(test_cnt.ch_names) == 22
    test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
    test_cnt = mne_apply(
        lambda a: bandpass_cnt(a,
                               low_cut_hz,
                               high_cut_hz,
                               test_cnt.info['sfreq'],
                               filt_order=3,
                               axis=1), test_cnt)
    test_cnt = mne_apply(
        lambda a: exponential_running_standardize(a.T,
                                                  factor_new=factor_new,
                                                  init_block_size=
                                                  init_block_size,
                                                  eps=1e-4).T, test_cnt)

    marker_def = OrderedDict([('Right Hand', [1]), ('Left Hand', [2])])

    train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
    test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)

    train_set, valid_set = split_into_two_sets(train_set,
                                               first_set_fraction=1 -
                                               valid_set_fraction)

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 2
    n_chans = int(train_set.X.shape[1])
    input_time_length = train_set.X.shape[2]
    if model == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length='auto').create_network()
    elif model == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length='auto').create_network()
    if cuda:
        model.cuda()
    log.info("Model: \n{:s}".format(str(model)))

    optimizer = optim.Adam(model.parameters())

    iterator = BalancedBatchSizeIterator(batch_size=batch_size)

    stop_criterion = Or([
        MaxEpochs(max_epochs),
        NoDecrease('valid_misclass', max_increase_epochs)
    ])

    monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=F.nll_loss,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    return exp
Exemplo n.º 32
0
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda):
    ival = [-500, 4000]
    input_time_length = 1000
    max_epochs = 800
    max_increase_epochs = 80
    batch_size = 60
    high_cut_hz = 38
    factor_new = 1e-3
    init_block_size = 1000
    valid_set_fraction = 0.2

    train_filename = 'A{:02d}T.gdf'.format(subject_id)
    test_filename = 'A{:02d}E.gdf'.format(subject_id)
    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)
    train_label_filepath = train_filepath.replace('.gdf', '.mat')
    test_label_filepath = test_filepath.replace('.gdf', '.mat')

    train_loader = BCICompetition4Set2A(train_filepath,
                                        labels_filename=train_label_filepath)
    test_loader = BCICompetition4Set2A(test_filepath,
                                       labels_filename=test_label_filepath)
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()

    # Preprocessing

    train_cnt = train_cnt.drop_channels(
        ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right'])
    assert len(train_cnt.ch_names) == 22
    # lets convert to millvolt for numerical stability of next operations
    train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
    train_cnt = mne_apply(
        lambda a: bandpass_cnt(a,
                               low_cut_hz,
                               high_cut_hz,
                               train_cnt.info['sfreq'],
                               filt_order=3,
                               axis=1), train_cnt)
    train_cnt = mne_apply(
        lambda a: exponential_running_standardize(a.T,
                                                  factor_new=factor_new,
                                                  init_block_size=
                                                  init_block_size,
                                                  eps=1e-4).T, train_cnt)

    test_cnt = test_cnt.drop_channels(
        ['STI 014', 'EOG-left', 'EOG-central', 'EOG-right'])
    assert len(test_cnt.ch_names) == 22
    test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
    test_cnt = mne_apply(
        lambda a: bandpass_cnt(a,
                               low_cut_hz,
                               high_cut_hz,
                               test_cnt.info['sfreq'],
                               filt_order=3,
                               axis=1), test_cnt)
    test_cnt = mne_apply(
        lambda a: exponential_running_standardize(a.T,
                                                  factor_new=factor_new,
                                                  init_block_size=
                                                  init_block_size,
                                                  eps=1e-4).T, test_cnt)

    marker_def = OrderedDict([('Left Hand', [1]), (
        'Right Hand',
        [2],
    ), ('Foot', [3]), ('Tongue', [4])])

    train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
    test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)

    train_set, valid_set = split_into_two_sets(train_set,
                                               first_set_fraction=1 -
                                               valid_set_fraction)

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 4
    n_chans = int(train_set.X.shape[1])
    if model == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length=30).create_network()
    elif model == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=2).create_network()

    to_dense_prediction_model(model)
    if cuda:
        model.cuda()

    log.info("Model: \n{:s}".format(str(model)))
    dummy_input = np_to_var(train_set.X[:1, :, :, None])
    if cuda:
        dummy_input = dummy_input.cuda()
    out = model(dummy_input)

    n_preds_per_input = out.cpu().data.numpy().shape[2]

    optimizer = optim.Adam(model.parameters())

    iterator = CropsFromTrialsIterator(batch_size=batch_size,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)

    stop_criterion = Or([
        MaxEpochs(max_epochs),
        NoDecrease('valid_misclass', max_increase_epochs)
    ])

    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length=input_time_length),
        RuntimeMonitor()
    ]

    model_constraint = MaxNormDefaultConstraint()

    loss_function = lambda preds, targets: F.nll_loss(
        th.mean(preds, dim=2, keepdim=False), targets)

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=loss_function,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    return exp
Exemplo n.º 33
0
def run_exp(max_recording_mins,
            n_recordings,
            sec_to_cut_at_start,
            sec_to_cut_at_end,
            duration_recording_mins,
            max_abs_val,
            clip_before_resample,
            sampling_freq,
            divisor,
            n_folds,
            i_test_fold,
            shuffle,
            merge_train_valid,
            model,
            input_time_length,
            optimizer,
            learning_rate,
            weight_decay,
            scheduler,
            model_constraint,
            batch_size,
            max_epochs,
            only_return_exp,
            time_cut_off_sec,
            start_time,
            test_on_eval,
            test_recording_mins,
            sensor_types,
            log_dir,
            np_th_seed,
            cuda=True):
    import torch.backends.cudnn as cudnn
    cudnn.benchmark = True
    if optimizer == 'adam':
        assert merge_train_valid == False
    else:
        assert optimizer == 'adamw'
        assert merge_train_valid == True

    preproc_functions = create_preproc_functions(
        sec_to_cut_at_start=sec_to_cut_at_start,
        sec_to_cut_at_end=sec_to_cut_at_end,
        duration_recording_mins=duration_recording_mins,
        max_abs_val=max_abs_val,
        clip_before_resample=clip_before_resample,
        sampling_freq=sampling_freq,
        divisor=divisor)

    dataset = DiagnosisSet(n_recordings=n_recordings,
                           max_recording_mins=max_recording_mins,
                           preproc_functions=preproc_functions,
                           train_or_eval='train',
                           sensor_types=sensor_types)

    if test_on_eval:
        if test_recording_mins is None:
            test_recording_mins = duration_recording_mins

        test_preproc_functions = create_preproc_functions(
            sec_to_cut_at_start=sec_to_cut_at_start,
            sec_to_cut_at_end=sec_to_cut_at_end,
            duration_recording_mins=test_recording_mins,
            max_abs_val=max_abs_val,
            clip_before_resample=clip_before_resample,
            sampling_freq=sampling_freq,
            divisor=divisor)
        test_dataset = DiagnosisSet(n_recordings=n_recordings,
                                    max_recording_mins=None,
                                    preproc_functions=test_preproc_functions,
                                    train_or_eval='eval',
                                    sensor_types=sensor_types)
    if not only_return_exp:
        X, y = dataset.load()
        max_shape = np.max([list(x.shape) for x in X], axis=0)
        assert max_shape[1] == int(duration_recording_mins * sampling_freq *
                                   60)
        if test_on_eval:
            test_X, test_y = test_dataset.load()
            max_shape = np.max([list(x.shape) for x in test_X], axis=0)
            assert max_shape[1] == int(test_recording_mins * sampling_freq *
                                       60)
    if not test_on_eval:
        splitter = TrainValidTestSplitter(n_folds,
                                          i_test_fold,
                                          shuffle=shuffle)
    else:
        splitter = TrainValidSplitter(n_folds,
                                      i_valid_fold=i_test_fold,
                                      shuffle=shuffle)
    if not only_return_exp:
        if not test_on_eval:
            train_set, valid_set, test_set = splitter.split(X, y)
        else:

            train_set, valid_set = splitter.split(X, y)
            test_set = SignalAndTarget(test_X, test_y)
            del test_X, test_y
        del X, y  # shouldn't be necessary, but just to make sure
        if merge_train_valid:
            train_set = concatenate_sets([train_set, valid_set])
            # just reduce valid for faster computations
            valid_set.X = valid_set.X[:8]
            valid_set.y = valid_set.y[:8]
            # np.save('/data/schirrmr/schirrmr/auto-diag/lukasrepr/compare/mne-0-16-2/train_X.npy', train_set.X)
            # np.save('/data/schirrmr/schirrmr/auto-diag/lukasrepr/compare/mne-0-16-2/train_y.npy', train_set.y)
            # np.save('/data/schirrmr/schirrmr/auto-diag/lukasrepr/compare/mne-0-16-2/valid_X.npy', valid_set.X)
            # np.save('/data/schirrmr/schirrmr/auto-diag/lukasrepr/compare/mne-0-16-2/valid_y.npy', valid_set.y)
            # np.save('/data/schirrmr/schirrmr/auto-diag/lukasrepr/compare/mne-0-16-2/test_X.npy', test_set.X)
            # np.save('/data/schirrmr/schirrmr/auto-diag/lukasrepr/compare/mne-0-16-2/test_y.npy', test_set.y)
    else:
        train_set = None
        valid_set = None
        test_set = None

    log.info("Model:\n{:s}".format(str(model)))
    if cuda:
        model.cuda()
    model.eval()
    in_chans = 21
    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    log.info("{:d} predictions per input/trial".format(n_preds_per_input))
    iterator = CropsFromTrialsIterator(batch_size=batch_size,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input,
                                       seed=np_th_seed)
    assert optimizer in ['adam', 'adamw'], ("Expect optimizer to be either "
                                            "adam or adamw")
    schedule_weight_decay = optimizer == 'adamw'
    if optimizer == 'adam':
        optim_class = optim.Adam
        assert schedule_weight_decay == False
        assert merge_train_valid == False
    else:
        optim_class = AdamW
        assert schedule_weight_decay == True
        assert merge_train_valid == True

    optimizer = optim_class(model.parameters(),
                            lr=learning_rate,
                            weight_decay=weight_decay)
    if scheduler is not None:
        assert scheduler == 'cosine'
        n_updates_per_epoch = sum(
            [1 for _ in iterator.get_batches(train_set, shuffle=True)])
        # Adapt if you have a different number of epochs
        n_updates_per_period = n_updates_per_epoch * max_epochs
        scheduler = CosineAnnealing(n_updates_per_period)
        optimizer = ScheduledOptimizer(
            scheduler, optimizer, schedule_weight_decay=schedule_weight_decay)
    loss_function = nll_loss_on_mean

    if model_constraint is not None:
        assert model_constraint == 'defaultnorm'
        model_constraint = MaxNormDefaultConstraint()
    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedDiagnosisMonitor(input_time_length, n_preds_per_input),
        RuntimeMonitor(),
    ]

    stop_criterion = MaxEpochs(max_epochs)
    loggers = [Printer(), TensorboardWriter(log_dir)]
    batch_modifier = None
    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator,
                     loss_function,
                     optimizer,
                     model_constraint,
                     monitors,
                     stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     batch_modifier=batch_modifier,
                     cuda=cuda,
                     loggers=loggers)

    if not only_return_exp:
        # Until first stop
        exp.setup_training()
        exp.monitor_epoch(exp.datasets)
        exp.log_epoch()
        exp.rememberer.remember_epoch(exp.epochs_df, exp.model, exp.optimizer)

        exp.iterator.reset_rng()
        while not exp.stop_criterion.should_stop(exp.epochs_df):
            if (time.time() - start_time) > time_cut_off_sec:
                log.info(
                    "Ran out of time after {:.2f} sec.".format(time.time() -
                                                               start_time))
                return exp
            log.info("Still in time after {:.2f} sec.".format(time.time() -
                                                              start_time))
            exp.run_one_epoch(exp.datasets, remember_best=True)
        if (time.time() - start_time) > time_cut_off_sec:
            log.info("Ran out of time after {:.2f} sec.".format(time.time() -
                                                                start_time))
            return exp
        if not merge_train_valid:
            exp.setup_after_stop_training()
            # Run until second stop
            datasets = exp.datasets
            datasets['train'] = concatenate_sets(
                [datasets['train'], datasets['valid']])
            exp.monitor_epoch(datasets)
            exp.log_epoch()

            exp.iterator.reset_rng()
            while not exp.stop_criterion.should_stop(exp.epochs_df):
                if (time.time() - start_time) > time_cut_off_sec:
                    log.info("Ran out of time after {:.2f} sec.".format(
                        time.time() - start_time))
                    return exp
                log.info("Still in time after {:.2f} sec.".format(time.time() -
                                                                  start_time))
                exp.run_one_epoch(datasets, remember_best=False)

    else:
        exp.dataset = dataset
        exp.splitter = splitter
    if test_on_eval:
        exp.test_dataset = test_dataset

    return exp
Exemplo n.º 34
0
def train_hyperopt(params):
    """ Runs one fold with given parameters and returns test misclass."""
    lasagne.random.set_rng(RandomState(9859295))

    template_name = params.pop('template_name')    
    params = adjust_params_for_hyperopt(params)
    
    config_strings = create_config_strings(template_name)
    config_objects = create_config_objects(config_strings)
    templates, _ = create_templates_variants_from_config_objects(
        config_objects)
    
    
    processed_templates, params_without_template_params  = process_templates(
                templates, params)
    final_params = process_parameters_by_templates(params_without_template_params, 
        processed_templates)
    
    # go to directory above this source-file
    main_template_filename = os.path.dirname(os.path.abspath(os.path.dirname(
        __file__)))
    # then complete path to config
    main_template_filename = os.path.join(main_template_filename, "configs", 
        "eegnet_template.yaml")
    
    with (open(main_template_filename, 'r')) as main_template_file:
        main_template_str = main_template_file.read()
        
        
    final_params['original_params'] = 'dummy'
    train_str = Template(main_template_str).substitute(final_params)
    
    def do_not_load_constructor(loader, node):
        return None
    yaml.add_constructor(u'!DoNotLoad', do_not_load_constructor)
    modified_train_str = train_str.replace('layers: ', 'layers: !DoNotLoad ')
    train_dict = yaml_parse.load(modified_train_str) 
    dataset = train_dict['dataset'] 
    dataset.load()
    dataset_provider = train_dict['dataset_provider']
    
    assert 'in_sensors' in train_str
    assert 'in_rows' in train_str
    assert 'in_cols' in train_str
    
    train_str = train_str.replace('in_sensors',
        str(dataset.get_topological_view().shape[1]))
    train_str = train_str.replace('in_rows',
        str(dataset.get_topological_view().shape[2]))
    train_str = train_str.replace('in_cols', 
        str(dataset.get_topological_view().shape[3]))
    
    train_dict =  yaml_parse.load(train_str)
    layers = train_dict['layers']
    final_layer = layers[-1]

    # turn off debug/info logging
    logging.getLogger("pylearn2").setLevel(logging.WARN)
    logging.getLogger("braindecode").setLevel(logging.WARN)
    exp = Experiment()
    exp.setup(final_layer, dataset_provider, **train_dict['exp_args'])
    exp.run()
    final_misclass = exp.monitor_chans['test_misclass'][-1]
    print("Result for")
    pprint(params)
    print("Final Test misclass: {:5.4f}".format(float(final_misclass)))
    return final_misclass