コード例 #1
0
def create_model_lstm(n_classes, input_time_length, in_chans=22):

    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    # final_conv_length determines the size of the receptive field of the ConvNet

    lstm_size = 30
    lstm_layers = 1

    print("#### LSTM SIZE {} ####".format(lstm_size))
    print("#### LSTM LAYERS {} ####".format(lstm_layers))

    model = ShallowFBCSPLSTM(in_chans=in_chans,
                             n_classes=n_classes,
                             input_time_length=input_time_length,
                             lstm_size=lstm_size,
                             lstm_layers=lstm_layers,
                             n_filters_time=lstm_size,
                             n_filters_spat=lstm_size,
                             final_conv_length=4,
                             pool_time_length=20,
                             pool_time_stride=5).create_network()
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    return model
コード例 #2
0
ファイル: hybrid.py プロジェクト: zhangys2019/braindecode
    def __init__(self, in_chans, n_classes, input_time_length):
        super(HybridNetModule, self).__init__()
        deep_model = Deep4Net(
            in_chans,
            n_classes,
            n_filters_time=20,
            n_filters_spat=30,
            n_filters_2=40,
            n_filters_3=50,
            n_filters_4=60,
            input_time_length=input_time_length,
            final_conv_length=2,
        ).create_network()
        shallow_model = ShallowFBCSPNet(
            in_chans,
            n_classes,
            input_time_length=input_time_length,
            n_filters_time=30,
            n_filters_spat=40,
            filter_time_length=28,
            final_conv_length=29,
        ).create_network()

        reduced_deep_model = nn.Sequential()
        for name, module in deep_model.named_children():
            if name == "conv_classifier":
                new_conv_layer = nn.Conv2d(
                    module.in_channels,
                    60,
                    kernel_size=module.kernel_size,
                    stride=module.stride,
                )
                reduced_deep_model.add_module("deep_final_conv",
                                              new_conv_layer)
                break
            reduced_deep_model.add_module(name, module)

        reduced_shallow_model = nn.Sequential()
        for name, module in shallow_model.named_children():
            if name == "conv_classifier":
                new_conv_layer = nn.Conv2d(
                    module.in_channels,
                    40,
                    kernel_size=module.kernel_size,
                    stride=module.stride,
                )
                reduced_shallow_model.add_module("shallow_final_conv",
                                                 new_conv_layer)
                break
            reduced_shallow_model.add_module(name, module)

        to_dense_prediction_model(reduced_deep_model)
        to_dense_prediction_model(reduced_shallow_model)
        self.reduced_deep_model = reduced_deep_model
        self.reduced_shallow_model = reduced_shallow_model
        self.final_conv = nn.Conv2d(100,
                                    n_classes,
                                    kernel_size=(1, 1),
                                    stride=1)
コード例 #3
0
def train(config):
    cuda = True
    model = config['model']
    if model == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=2,
                         config=config).create_network()

    to_dense_prediction_model(model)
    if cuda:
        model.cuda()

    log.info("Model: \n{:s}".format(str(model)))
    dummy_input = np_to_var(train_set.X[:1, :, :, None])
    if cuda:
        dummy_input = dummy_input.cuda()
    out = model(dummy_input)

    n_preds_per_input = out.cpu().data.numpy().shape[2]

    optimizer = optim.Adam(model.parameters())

    iterator = CropsFromTrialsIterator(batch_size=60,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)

    stop_criterion = Or([MaxEpochs(20), NoDecrease('valid_misclass', 80)])

    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length=input_time_length),
        RuntimeMonitor()
    ]

    model_constraint = MaxNormDefaultConstraint()

    loss_function = lambda preds, targets: F.nll_loss(
        th.mean(preds, dim=2, keepdim=False), targets)

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=loss_function,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     cuda=cuda)
    exp.run()
    print(exp.rememberer)
    return exp.rememberer.lowest_val
コード例 #4
0
 def finalized_model_to_dilated(self, model):
     to_dense_prediction_model(model)
     conv_classifier = model.conv_classifier
     model.conv_classifier = nn.Conv2d(conv_classifier.in_channels,
                                       conv_classifier.out_channels,
                                       (global_vars.get('final_conv_size'),
                                        conv_classifier.kernel_size[1]),
                                       stride=conv_classifier.stride,
                                       dilation=conv_classifier.dilation)
コード例 #5
0
ファイル: base.py プロジェクト: Shasvat-Desai/braindecode
 def compile(self, loss, optimizer, metrics, cropped=False, seed=0):
     self.loss = loss
     self._ensure_network_exists()
     if cropped:
         to_dense_prediction_model(self.network)
     if not hasattr(optimizer, 'step'):
         optimizer_class = find_optimizer(optimizer)
         optimizer = optimizer_class(self.network.parameters())
     self.optimizer = optimizer
     self.metrics = metrics
     self.seed_rng = RandomState(seed)
     self.cropped = cropped
     self.compiled = True
コード例 #6
0
    def compile(
        self,
        loss,
        optimizer,
        extra_monitors=None,
        cropped=False,
        iterator_seed=0,
    ):
        """
        Setup training for this model.

        Parameters
        ----------
        loss: function (predictions, targets) -> torch scalar
        optimizer: `torch.optim.Optimizer` or string
            Either supply an optimizer or the name of the class (e.g. 'adam')
        extra_monitors: List of Braindecode monitors, optional
            In case you want to monitor additional values except for loss, misclass and runtime.
        cropped: bool
            Whether to perform cropped decoding, see cropped decoding tutorial.
        iterator_seed: int
            Seed to seed the iterator random generator.
        Returns
        -------

        """
        self.loss = loss
        self._ensure_network_exists()
        if cropped:
            model_already_dense = np.any(
                [
                    hasattr(m, "dilation")
                    and (m.dilation != 1)
                    and (m.dilation) != (1, 1)
                    for m in self.network.modules()
                ]
            )
            if not model_already_dense:
                to_dense_prediction_model(self.network)
            else:
                log.info("Seems model was already converted to dense model...")
        if not hasattr(optimizer, "step"):
            optimizer_class = find_optimizer(optimizer)
            optimizer = optimizer_class(self.network.parameters())
        self.optimizer = optimizer
        self.extra_monitors = extra_monitors
        # Already setting it here, so multiple calls to fit
        # will lead to different batches being drawn
        self.seed_rng = RandomState(iterator_seed)
        self.cropped = cropped
        self.compiled = True
コード例 #7
0
 def make_regressor(self):
     """
     Transforms the model from a network suited for classification into a network suited
     for regression by removing the last softmax layer
     :return:
     """
     if not self.regressed:
         new_model = nn.Sequential()
         for name, module in self.model.named_children():
             # print(name)
             if 'softmax' in name:
                 break
             new_model.add_module(name, module)
         new_model.add_module('squeeze_out', Expression(squeeze_out))
         self.model = new_model
         to_dense_prediction_model(self.model)
         self.regressed = True
コード例 #8
0
def create_model(n_classes, input_time_length, in_chans=22):

    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(in_chans=in_chans,
                            n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length=4,
                            pool_time_length=20,
                            pool_time_stride=5).create_network()
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    return model
コード例 #9
0
ファイル: deep4.py プロジェクト: kahartma/eeggan
def build_model(input_time_length, n_chans, n_classes, cropped=False):
    model = Deep4Net(n_chans,
                     n_classes,
                     input_time_length=input_time_length,
                     final_conv_length='auto')

    if cropped:
        final_conv_length = model.final_conv_length
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=final_conv_length)

    model = model.create_network()
    if cropped:
        to_dense_prediction_model(model)

    return model
コード例 #10
0
ファイル: model.py プロジェクト: poplar17/eeg_thesis
def build_cropped_model(model_name, n_chans, n_classes, config):
    # input_time_length:
    #   will determine how many crops are processed in parallel
    #   supercrop, number of crops taken through network together
    # final_conv_length:
    #   will determine how many crops are processed in parallel
    #   we manually set the length of the final convolution layer
    #   to some length that makes the receptive field of the
    #   ConvNet smaller than the number of samples in a trial
    cropped_input_time_length = config['cropped']['input_time_length']
    final_conv_length_shallow = config['cropped']['final_conv_length_shallow']
    final_conv_length_deep = config['cropped']['final_conv_length_deep']
    if model_name == 'shallow':
        model = ShallowFBCSPNet(n_chans, n_classes,
                                input_time_length=cropped_input_time_length,
                                final_conv_length=final_conv_length_shallow) \
            .create_network()
    elif model_name == 'deep':
        model = Deep4Net(n_chans, n_classes,
                         input_time_length=cropped_input_time_length,
                         final_conv_length=final_conv_length_deep) \
            .create_network()
    to_dense_prediction_model(model)
    return model
コード例 #11
0
def run_exp(max_recording_mins, n_recordings, sec_to_cut,
            duration_recording_mins, max_abs_val, max_min_threshold,
            max_min_expected, shrink_val, max_min_remove, batch_set_zero_val,
            batch_set_zero_test, sampling_freq, low_cut_hz, high_cut_hz,
            exp_demean, exp_standardize, moving_demean, moving_standardize,
            channel_demean, channel_standardize, divisor, n_folds, i_test_fold,
            input_time_length, final_conv_length, pool_stride, n_blocks_to_add,
            sigmoid, model_constraint, batch_size, max_epochs,
            only_return_exp):
    cuda = True

    preproc_functions = []
    preproc_functions.append(lambda data, fs: (
        data[:, int(sec_to_cut * fs):-int(sec_to_cut * fs)], fs))
    preproc_functions.append(lambda data, fs: (data[:, :int(
        duration_recording_mins * 60 * fs)], fs))
    if max_abs_val is not None:
        preproc_functions.append(
            lambda data, fs: (np.clip(data, -max_abs_val, max_abs_val), fs))
    if max_min_threshold is not None:
        preproc_functions.append(lambda data, fs: (clean_jumps(
            data, 200, max_min_threshold, max_min_expected, cuda), fs))
    if max_min_remove is not None:
        window_len = 200
        preproc_functions.append(lambda data, fs: (set_jumps_to_zero(
            data,
            window_len=window_len,
            threshold=max_min_remove,
            cuda=cuda,
            clip_min_max_to_zero=True), fs))

    if shrink_val is not None:
        preproc_functions.append(lambda data, fs: (shrink_spikes(
            data,
            shrink_val,
            1,
            9,
        ), fs))

    preproc_functions.append(lambda data, fs: (resampy.resample(
        data, fs, sampling_freq, axis=1, filter='kaiser_fast'), sampling_freq))
    preproc_functions.append(lambda data, fs: (bandpass_cnt(
        data, low_cut_hz, high_cut_hz, fs, filt_order=4, axis=1), fs))

    if exp_demean:
        preproc_functions.append(lambda data, fs: (exponential_running_demean(
            data.T, factor_new=0.001, init_block_size=100).T, fs))
    if exp_standardize:
        preproc_functions.append(
            lambda data, fs: (exponential_running_standardize(
                data.T, factor_new=0.001, init_block_size=100).T, fs))
    if moving_demean:
        preproc_functions.append(lambda data, fs: (padded_moving_demean(
            data, axis=1, n_window=201), fs))
    if moving_standardize:
        preproc_functions.append(lambda data, fs: (padded_moving_standardize(
            data, axis=1, n_window=201), fs))
    if channel_demean:
        preproc_functions.append(lambda data, fs: (demean(data, axis=1), fs))
    if channel_standardize:
        preproc_functions.append(lambda data, fs:
                                 (standardize(data, axis=1), fs))
    if divisor is not None:
        preproc_functions.append(lambda data, fs: (data / divisor, fs))

    dataset = DiagnosisSet(n_recordings=n_recordings,
                           max_recording_mins=max_recording_mins,
                           preproc_functions=preproc_functions)
    if not only_return_exp:
        X, y = dataset.load()

    splitter = Splitter(
        n_folds,
        i_test_fold,
    )
    if not only_return_exp:
        train_set, valid_set, test_set = splitter.split(X, y)
        del X, y  # shouldn't be necessary, but just to make sure
    else:
        train_set = None
        valid_set = None
        test_set = None

    set_random_seeds(seed=20170629, cuda=cuda)
    if sigmoid:
        n_classes = 1
    else:
        n_classes = 2
    in_chans = 21

    net = Deep4Net(
        in_chans=in_chans,
        n_classes=n_classes,
        input_time_length=input_time_length,
        final_conv_length=final_conv_length,
        pool_time_length=pool_stride,
        pool_time_stride=pool_stride,
        n_filters_2=50,
        n_filters_3=80,
        n_filters_4=120,
    )
    model = net_with_more_layers(net, n_blocks_to_add, nn.MaxPool2d)
    if sigmoid:
        model = to_linear_plus_minus_net(model)
    optimizer = optim.Adam(model.parameters())
    to_dense_prediction_model(model)
    log.info("Model:\n{:s}".format(str(model)))
    if cuda:
        model.cuda()
    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    log.info("{:d} predictions per input/trial".format(n_preds_per_input))
    iterator = CropsFromTrialsIterator(batch_size=batch_size,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)
    if sigmoid:
        loss_function = lambda preds, targets: binary_cross_entropy_with_logits(
            th.mean(preds, dim=2)[:, 1, 0], targets.type_as(preds))
    else:
        loss_function = lambda preds, targets: F.nll_loss(
            th.mean(preds, dim=2)[:, :, 0], targets)

    if model_constraint is not None:
        model_constraint = MaxNormDefaultConstraint()
    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length),
        RuntimeMonitor(),
    ]
    stop_criterion = MaxEpochs(max_epochs)
    batch_modifier = None
    if batch_set_zero_val is not None:
        batch_modifier = RemoveMinMaxDiff(batch_set_zero_val,
                                          clip_max_abs=True,
                                          set_zero=True)
    if (batch_set_zero_val is not None) and (batch_set_zero_test == True):
        iterator = ModifiedIterator(
            iterator,
            batch_modifier,
        )
        batch_modifier = None
    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator,
                     loss_function,
                     optimizer,
                     model_constraint,
                     monitors,
                     stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     batch_modifier=batch_modifier,
                     cuda=cuda)
    if not only_return_exp:
        exp.run()
    else:
        exp.dataset = dataset
        exp.splitter = splitter

    return exp
コード例 #12
0
def test_cropped_decoding():
    import mne
    from mne.io import concatenate_raws

    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id, event_codes)

    # Load each of the files
    parts = [mne.io.read_raw_edf(path, preload=True, stim_channel='auto',
                                 verbose='WARNING')
             for path in physionet_paths]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events = mne.find_events(raw, shortest_event=0, stim_channel='STI 014')

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info, meg=False, eeg=True, stim=False,
                                      eog=False,
                                      exclude='bads')

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(raw, events, dict(hands=2, feet=3), tmin=1, tmax=4.1,
                         proj=False, picks=eeg_channel_inds,
                         baseline=None, preload=True)
    import numpy as np
    from braindecode.datautil.signal_target import SignalAndTarget
    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    train_set = SignalAndTarget(X[:60], y=y[:60])
    test_set = SignalAndTarget(X[60:], y=y[60:])
    from braindecode.models.shallow_fbcsp import ShallowFBCSPNet
    from torch import nn
    from braindecode.torch_ext.util import set_random_seeds
    from braindecode.models.util import to_dense_prediction_model

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = train_set.X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(in_chans=in_chans, n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length=12).create_network()
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    from torch import optim

    optimizer = optim.Adam(model.parameters())
    from braindecode.torch_ext.util import np_to_var
    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    print("{:d} predictions per input/trial".format(n_preds_per_input))
    from braindecode.datautil.iterators import CropsFromTrialsIterator
    iterator = CropsFromTrialsIterator(batch_size=32,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)
    from braindecode.torch_ext.util import np_to_var, var_to_np
    import torch.nn.functional as F
    from numpy.random import RandomState
    import torch as th
    from braindecode.experiments.monitors import compute_preds_per_trial_from_crops
    rng = RandomState((2017, 6, 30))
    losses = []
    accuracies = []
    for i_epoch in range(4):
        # Set model to training mode
        model.train()
        for batch_X, batch_y in iterator.get_batches(train_set, shuffle=False):
            net_in = np_to_var(batch_X)
            if cuda:
                net_in = net_in.cuda()
            net_target = np_to_var(batch_y)
            if cuda:
                net_target = net_target.cuda()
            # Remove gradients of last backward pass from all parameters
            optimizer.zero_grad()
            outputs = model(net_in)
            # Mean predictions across trial
            # Note that this will give identical gradients to computing
            # a per-prediction loss (at least for the combination of log softmax activation
            # and negative log likelihood loss which we are using here)
            outputs = th.mean(outputs, dim=2, keepdim=False)
            loss = F.nll_loss(outputs, net_target)
            loss.backward()
            optimizer.step()

        # Print some statistics each epoch
        model.eval()
        print("Epoch {:d}".format(i_epoch))
        for setname, dataset in (('Train', train_set), ('Test', test_set)):
            # Collect all predictions and losses
            all_preds = []
            all_losses = []
            batch_sizes = []
            for batch_X, batch_y in iterator.get_batches(dataset,
                                                         shuffle=False):
                net_in = np_to_var(batch_X)
                if cuda:
                    net_in = net_in.cuda()
                net_target = np_to_var(batch_y)
                if cuda:
                    net_target = net_target.cuda()
                outputs = model(net_in)
                all_preds.append(var_to_np(outputs))
                outputs = th.mean(outputs, dim=2, keepdim=False)
                loss = F.nll_loss(outputs, net_target)
                loss = float(var_to_np(loss))
                all_losses.append(loss)
                batch_sizes.append(len(batch_X))
            # Compute mean per-input loss
            loss = np.mean(np.array(all_losses) * np.array(batch_sizes) /
                           np.mean(batch_sizes))
            print("{:6s} Loss: {:.5f}".format(setname, loss))
            losses.append(loss)
            # Assign the predictions to the trials
            preds_per_trial = compute_preds_per_trial_from_crops(all_preds,
                                                              input_time_length,
                                                              dataset.X)
            # preds per trial are now trials x classes x timesteps/predictions
            # Now mean across timesteps for each trial to get per-trial predictions
            meaned_preds_per_trial = np.array(
                [np.mean(p, axis=1) for p in preds_per_trial])
            predicted_labels = np.argmax(meaned_preds_per_trial, axis=1)
            accuracy = np.mean(predicted_labels == dataset.y)
            accuracies.append(accuracy * 100)
            print("{:6s} Accuracy: {:.1f}%".format(
                setname, accuracy * 100))
    np.testing.assert_allclose(
        np.array(losses),
        np.array([1.703004002571106,
                  1.6295261979103088,
                  0.71168938279151917,
                  0.70825588703155518,
                  0.58231228590011597,
                  0.60176041722297668,
                  0.46629951894283295,
                  0.51184913516044617]),
        rtol=1e-4, atol=1e-5)
    np.testing.assert_allclose(
        np.array(accuracies),
        np.array(
            [50.0,
             46.666666666666664,
             60.0,
             53.333333333333336,
             68.333333333333329,
             66.666666666666657,
             88.333333333333329,
             83.333333333333343]),
        rtol=1e-4, atol=1e-5)
コード例 #13
0
def test_eeg_classifier():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_window_samples = 450
    n_classes = 2
    in_chans = X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=12,
    )
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_window_samples, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]

    train_set = create_from_X_y(X[:48],
                                y[:48],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    valid_set = create_from_X_y(X[48:60],
                                y[48:60],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    cropped_cb_train = CroppedTrialEpochScoring(
        "accuracy",
        name="train_trial_accuracy",
        lower_is_better=False,
        on_train=True,
    )

    cropped_cb_valid = CroppedTrialEpochScoring(
        "accuracy",
        on_train=False,
        name="valid_trial_accuracy",
        lower_is_better=False,
    )

    clf = EEGClassifier(
        model,
        cropped=True,
        criterion=CroppedLoss,
        criterion__loss_function=nll_loss,
        optimizer=optim.Adam,
        train_split=predefined_split(valid_set),
        batch_size=32,
        callbacks=[
            ("train_trial_accuracy", cropped_cb_train),
            ("valid_trial_accuracy", cropped_cb_valid),
        ],
    )

    clf.fit(train_set, y=None, epochs=4)

    expected = [{
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.6639312505722046
        }, {
            'train_batch_size': 32,
            'train_loss': 2.6161606311798096
        }, {
            'train_batch_size': 32,
            'train_loss': 1.627132773399353
        }, {
            'valid_batch_size': 24,
            'valid_loss': 0.9677614569664001
        }],
        'epoch':
        1,
        'train_batch_count':
        3,
        'train_loss':
        1.9690748850504558,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.4791666666666667,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        0.9677614569664001,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        True
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.3829222917556763
        }, {
            'train_batch_size': 32,
            'train_loss': 1.3123714923858643
        }, {
            'train_batch_size': 32,
            'train_loss': 1.0109959840774536
        }, {
            'valid_batch_size': 24,
            'valid_loss': 1.9435862302780151
        }],
        'epoch':
        2,
        'train_batch_count':
        3,
        'train_loss':
        1.2354299227396648,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.5,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        1.9435862302780151,
        'valid_loss_best':
        False,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        False
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.172208547592163
        }, {
            'train_batch_size': 32,
            'train_loss': 0.8899562954902649
        }, {
            'train_batch_size': 32,
            'train_loss': 1.0232216119766235
        }, {
            'valid_batch_size': 24,
            'valid_loss': 0.9585554599761963
        }],
        'epoch':
        3,
        'train_batch_count':
        3,
        'train_loss':
        1.0284621516863506,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.5,
        'train_trial_accuracy_best':
        False,
        'valid_batch_count':
        1,
        'valid_loss':
        0.9585554599761963,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        False
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 0.9693693518638611
        }, {
            'train_batch_size': 32,
            'train_loss': 0.900641918182373
        }, {
            'train_batch_size': 32,
            'train_loss': 0.8839665651321411
        }, {
            'valid_batch_size': 24,
            'valid_loss': 0.873468816280365
        }],
        'epoch':
        4,
        'train_batch_count':
        3,
        'train_loss':
        0.9179926117261251,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.625,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        0.873468816280365,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.4166666666666667,
        'valid_trial_accuracy_best':
        False
    }]

    history_without_dur = [{k: v
                            for k, v in h.items() if k != "dur"}
                           for h in clf.history]
    assert_deep_allclose(expected, history_without_dur, atol=1e-3, rtol=1e-3)
コード例 #14
0
def runModel(mode):
    cudnn.benchmark = True

    start = time.time()

    #mode = str(sys.argv[1])
    #X,y,test_X,test_y = loadSubNormData(mode='all')
    #X,y,test_X,test_y = loadNEDCdata(mode=mode)

    #data = np.load('sessionsData/data%s-sessions.npy'%mode[:3])
    #labels = np.load('sessionsData/labels%s-sessions.npy'%mode[:3])

    data = np.load('data%s.npy' % mode[:3])
    labels = np.load('labels%s.npy' % mode[:3])

    X, y, test_X, test_y = splitDataRandom_Loaded(data, labels, mode)

    print('Mode - %s Total n: %d, Test n: %d' %
          (mode, len(y) + len(test_y), len(test_y)))
    #return 0

    #X = addDataNoise(X,band=[1,4])
    #test_X = addDataNoise(test_X,band=[1,4])

    max_shape = np.max([list(x.shape) for x in X], axis=0)

    assert max_shape[1] == int(config.duration_recording_mins *
                               config.sampling_freq * 60)

    n_classes = 2
    n_recordings = None  # set to an integer, if you want to restrict the set size
    sensor_types = ["EEG"]
    n_chans = 19  #21
    max_recording_mins = 35  # exclude larger recordings from training set
    sec_to_cut = 60  # cut away at start of each recording
    duration_recording_mins = 5  #20  # how many minutes to use per recording
    test_recording_mins = 5  #20
    max_abs_val = 800  # for clipping
    sampling_freq = 100
    divisor = 10  # divide signal by this
    test_on_eval = True  # teston evaluation set or on training set
    # in case of test on eval, n_folds and i_testfold determine
    # validation fold in training set for training until first stop
    n_folds = 10
    i_test_fold = 9
    shuffle = True
    model_name = 'linear'  #'deep'#'shallow' 'linear'
    n_start_chans = 25
    n_chan_factor = 2  # relevant for deep model only
    input_time_length = 6000
    final_conv_length = 1
    model_constraint = 'defaultnorm'
    init_lr = 1e-3
    batch_size = 64
    max_epochs = 35  # until first stop, the continue train on train+valid
    cuda = True  # False

    if model_name == 'shallow':
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_start_chans,
            n_filters_spat=n_start_chans,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length).create_network()
    elif model_name == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         n_filters_time=n_start_chans,
                         n_filters_spat=n_start_chans,
                         input_time_length=input_time_length,
                         n_filters_2=int(n_start_chans * n_chan_factor),
                         n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
                         n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
                         final_conv_length=final_conv_length,
                         stride_before_pool=True).create_network()
    elif (model_name == 'deep_smac'):
        if model_name == 'deep_smac':
            do_batch_norm = False
        else:
            assert model_name == 'deep_smac_bnorm'
            do_batch_norm = True
        double_time_convs = False
        drop_prob = 0.244445
        filter_length_2 = 12
        filter_length_3 = 14
        filter_length_4 = 12
        filter_time_length = 21
        final_conv_length = 1
        first_nonlin = elu
        first_pool_mode = 'mean'
        first_pool_nonlin = identity
        later_nonlin = elu
        later_pool_mode = 'mean'
        later_pool_nonlin = identity
        n_filters_factor = 1.679066
        n_filters_start = 32
        pool_time_length = 1
        pool_time_stride = 2
        split_first_layer = True
        n_chan_factor = n_filters_factor
        n_start_chans = n_filters_start
        model = Deep4Net(n_chans,
                         n_classes,
                         n_filters_time=n_start_chans,
                         n_filters_spat=n_start_chans,
                         input_time_length=input_time_length,
                         n_filters_2=int(n_start_chans * n_chan_factor),
                         n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
                         n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
                         final_conv_length=final_conv_length,
                         batch_norm=do_batch_norm,
                         double_time_convs=double_time_convs,
                         drop_prob=drop_prob,
                         filter_length_2=filter_length_2,
                         filter_length_3=filter_length_3,
                         filter_length_4=filter_length_4,
                         filter_time_length=filter_time_length,
                         first_nonlin=first_nonlin,
                         first_pool_mode=first_pool_mode,
                         first_pool_nonlin=first_pool_nonlin,
                         later_nonlin=later_nonlin,
                         later_pool_mode=later_pool_mode,
                         later_pool_nonlin=later_pool_nonlin,
                         pool_time_length=pool_time_length,
                         pool_time_stride=pool_time_stride,
                         split_first_layer=split_first_layer,
                         stride_before_pool=True).create_network()
    elif model_name == 'shallow_smac':
        conv_nonlin = identity
        do_batch_norm = True
        drop_prob = 0.328794
        filter_time_length = 56
        final_conv_length = 22
        n_filters_spat = 73
        n_filters_time = 24
        pool_mode = 'max'
        pool_nonlin = identity
        pool_time_length = 84
        pool_time_stride = 3
        split_first_layer = True
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_filters_time,
            n_filters_spat=n_filters_spat,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length,
            conv_nonlin=conv_nonlin,
            batch_norm=do_batch_norm,
            drop_prob=drop_prob,
            filter_time_length=filter_time_length,
            pool_mode=pool_mode,
            pool_nonlin=pool_nonlin,
            pool_time_length=pool_time_length,
            pool_time_stride=pool_time_stride,
            split_first_layer=split_first_layer,
        ).create_network()
    elif model_name == 'linear':
        model = nn.Sequential()
        model.add_module("conv_classifier",
                         nn.Conv2d(n_chans, n_classes, (600, 1)))
        model.add_module('softmax', nn.LogSoftmax(dim=1))
        model.add_module('squeeze', Expression(lambda x: x.squeeze(3)))
    else:
        assert False, "unknown model name {:s}".format(model_name)

    to_dense_prediction_model(model)

    if config.cuda:
        model.cuda()
    test_input = np_to_var(
        np.ones((2, config.n_chans, config.input_time_length, 1),
                dtype=np.float32))
    if config.cuda:
        test_input = test_input.cuda()

    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    iterator = CropsFromTrialsIterator(
        batch_size=config.batch_size,
        input_time_length=config.input_time_length,
        n_preds_per_input=n_preds_per_input)

    #model.add_module('softmax', nn.LogSoftmax(dim=1))

    model.eval()

    mode[2] = str(mode[2])
    mode[3] = str(mode[3])
    modelName = '-'.join(mode[:4])

    #params = th.load('sessionsData/%sModel%s-sessions.pt'%(modelName,mode[4]))
    #params = th.load('%sModel%s.pt'%(modelName,mode[4]))
    params = th.load('linear/%sModel%s.pt' % (modelName, mode[4]))

    model.load_state_dict(params)

    if config.test_on_eval:
        #test_X, test_y = test_dataset.load()
        #test_X, test_y = loadNEDCdata(mode='eval')
        max_shape = np.max([list(x.shape) for x in test_X], axis=0)
        assert max_shape[1] == int(config.test_recording_mins *
                                   config.sampling_freq * 60)
    if not config.test_on_eval:
        splitter = TrainValidTestSplitter(config.n_folds,
                                          config.i_test_fold,
                                          shuffle=config.shuffle)
        train_set, valid_set, test_set = splitter.split(X, y)
    else:
        splitter = TrainValidSplitter(config.n_folds,
                                      i_valid_fold=config.i_test_fold,
                                      shuffle=config.shuffle)
        train_set, valid_set = splitter.split(X, y)
        test_set = SignalAndTarget(test_X, test_y)
        del test_X, test_y
    del X, y  # shouldn't be necessary, but just to make sure

    datasets = OrderedDict(
        (('train', train_set), ('valid', valid_set), ('test', test_set)))

    for setname in ('train', 'valid', 'test'):
        #setname = 'test'
        #print("Compute predictions for {:s}...".format(setname))
        dataset = datasets[setname]
        if config.cuda:
            preds_per_batch = [
                var_to_np(model(np_to_var(b[0]).cuda()))
                for b in iterator.get_batches(dataset, shuffle=False)
            ]
        else:
            preds_per_batch = [
                var_to_np(model(np_to_var(b[0])))
                for b in iterator.get_batches(dataset, shuffle=False)
            ]
        preds_per_trial = compute_preds_per_trial(
            preds_per_batch,
            dataset,
            input_time_length=iterator.input_time_length,
            n_stride=iterator.n_preds_per_input)
        mean_preds_per_trial = [
            np.mean(preds, axis=(0, 2)) for preds in preds_per_trial
        ]
        mean_preds_per_trial = np.array(mean_preds_per_trial)

        all_pred_labels = np.argmax(mean_preds_per_trial, axis=1).squeeze()
        all_target_labels = dataset.y
        acc_per_class = []
        for i_class in range(n_classes):
            mask = all_target_labels == i_class
            acc = np.mean(all_pred_labels[mask] == all_target_labels[mask])
            acc_per_class.append(acc)
        misclass = 1 - np.mean(acc_per_class)
        #print('Acc:{}, Class 0:{}, Class 1:{}'.format(np.mean(acc_per_class),acc_per_class[0],acc_per_class[1]))

        if setname == 'test':
            testResult = np.mean(acc_per_class)

    return testResult
コード例 #15
0
def exp(subject_id):
    import torch
    test_subj = np.r_[subject_id]

    print('test subj:' + str(test_subj))

    #20% validation
    train_size = int(0.9* len(splitted['session_T']))
    test_size = len(splitted['session_T']) - train_size



    # train_set, valid_set = torch.utils.data.random_split(splitted['session_T'], [train_size, test_size])
    train_set = splitted['session_T']
    test_set = splitted['session_E']



    # model = Deep4Net(
    #     n_chans,
    #     n_classes,
    #     input_window_samples=input_window_samples,
    #     final_conv_length="auto",
    # )

    from torch.utils.data import Dataset, ConcatDataset




    crop_size = 1000
    # embedding_net = Deep4Net_origin(n_classes, n_chans, crop_size)
    # model = FcClfNet(embedding_net)

    model = ShallowFBCSPNet(
        n_chans,
        n_classes,
        input_window_samples=input_window_samples,
        final_conv_length='auto',
    )

    from braindecode.models.util import to_dense_prediction_model, get_output_shape
    to_dense_prediction_model(model)

    n_preds_per_input = get_output_shape(model, 22, input_window_samples)[2]
    print("n_preds_per_input : ", n_preds_per_input)
    print(model)


    batch_size =8
    epochs = 200






    lr = 0.0625 * 0.01
    weight_decay = 0



    train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)
    # valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=False)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)



    # Send model to GPU
    if cuda:
        model.cuda()

    from torch.optim import lr_scheduler
    import torch.optim as optim

    import argparse
    parser = argparse.ArgumentParser(description='cross subject domain adaptation')
    parser.add_argument('--batch-size', type=int, default=50, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=50, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=100, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model', action='store_true', default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    args.gpuidx = 0
    args.seed = 0
    args.use_tensorboard = False
    args.save_model = False

    optimizer = optim.AdamW(model.parameters(), lr=0.01, weight_decay=0.5 * 0.001)
    # scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=1)
    scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs-1)



    import pandas as pd
    results_columns = ['test_loss',  'test_accuracy']
    df = pd.DataFrame(columns=results_columns)

    for epochidx in range(1, epochs):
        print(epochidx)
        train_crop(10, model, device, train_loader,optimizer,scheduler,cuda, args.gpuidx)
        test_loss, test_score = eval_crop(model, device, test_loader)
        results = { 'test_loss': test_loss, 'test_accuracy': test_score}
        df = df.append(results, ignore_index=True)
        print(results)

    return df
コード例 #16
0
def run_exp(max_recording_mins, n_recordings, sec_to_cut,
            duration_recording_mins, max_abs_val, shrink_val, sampling_freq,
            divisor, n_folds, i_test_fold, final_conv_length, model_constraint,
            batch_size, max_epochs, n_filters_time, n_filters_spat,
            filter_time_length, conv_nonlin, pool_time_length,
            pool_time_stride, pool_mode, pool_nonlin, split_first_layer,
            do_batch_norm, drop_prob, time_cut_off_sec, start_time,
            input_time_length, only_return_exp):
    kwargs = locals()
    for model_param in [
            'final_conv_length',
            'n_filters_time',
            'n_filters_spat',
            'filter_time_length',
            'conv_nonlin',
            'pool_time_length',
            'pool_time_stride',
            'pool_mode',
            'pool_nonlin',
            'split_first_layer',
            'do_batch_norm',
            'drop_prob',
    ]:
        kwargs.pop(model_param)
    nonlin_dict = {
        'elu': elu,
        'relu': relu,
        'relu6': relu6,
        'tanh': tanh,
        'square': square,
        'identity': identity,
        'log': safe_log,
    }
    assert input_time_length == 6000
    # copy over from early seizure
    # make proper
    n_classes = 2
    in_chans = 21
    cuda = True
    set_random_seeds(seed=20170629, cuda=cuda)
    model = ShallowFBCSPNet(in_chans=in_chans,
                            n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length=final_conv_length,
                            n_filters_time=n_filters_time,
                            filter_time_length=filter_time_length,
                            n_filters_spat=n_filters_spat,
                            pool_time_length=pool_time_length,
                            pool_time_stride=pool_time_stride,
                            conv_nonlin=nonlin_dict[conv_nonlin],
                            pool_mode=pool_mode,
                            pool_nonlin=nonlin_dict[pool_nonlin],
                            split_first_layer=split_first_layer,
                            batch_norm=do_batch_norm,
                            batch_norm_alpha=0.1,
                            drop_prob=drop_prob).create_network()

    to_dense_prediction_model(model)
    if cuda:
        model.cuda()
    model.eval()
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()

    try:
        out = model(test_input)
    except RuntimeError:
        raise ValueError("Model receptive field too large...")
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    n_receptive_field = input_time_length - n_preds_per_input

    if n_receptive_field > 6000:
        raise ValueError("Model receptive field ({:d}) too large...".format(
            n_receptive_field))
        # For future, here optionally add input time length instead

    model = ShallowFBCSPNet(in_chans=in_chans,
                            n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length=final_conv_length,
                            n_filters_time=n_filters_time,
                            filter_time_length=filter_time_length,
                            n_filters_spat=n_filters_spat,
                            pool_time_length=pool_time_length,
                            pool_time_stride=pool_time_stride,
                            conv_nonlin=nonlin_dict[conv_nonlin],
                            pool_mode=pool_mode,
                            pool_nonlin=nonlin_dict[pool_nonlin],
                            split_first_layer=split_first_layer,
                            batch_norm=do_batch_norm,
                            batch_norm_alpha=0.1,
                            drop_prob=drop_prob).create_network()
    return common.run_exp(model=model, **kwargs)
def run_exp(
    data_folders,
    n_recordings,
    sensor_types,
    n_chans,
    max_recording_mins,
    sec_to_cut,
    duration_recording_mins,
    test_recording_mins,
    max_abs_val,
    sampling_freq,
    divisor,
    test_on_eval,
    n_folds,
    i_test_fold,
    shuffle,
    model_name,
    n_start_chans,
    n_chan_factor,
    input_time_length,
    final_conv_length,
    model_constraint,
    init_lr,
    batch_size,
    max_epochs,
    cuda,
):

    import torch.backends.cudnn as cudnn
    cudnn.benchmark = True
    preproc_functions = []
    preproc_functions.append(lambda data, fs: (
        data[:, int(sec_to_cut * fs):-int(sec_to_cut * fs)], fs))
    preproc_functions.append(lambda data, fs: (data[:, :int(
        duration_recording_mins * 60 * fs)], fs))
    if max_abs_val is not None:
        preproc_functions.append(
            lambda data, fs: (np.clip(data, -max_abs_val, max_abs_val), fs))

    preproc_functions.append(lambda data, fs: (resampy.resample(
        data, fs, sampling_freq, axis=1, filter='kaiser_fast'), sampling_freq))

    if divisor is not None:
        preproc_functions.append(lambda data, fs: (data / divisor, fs))

    dataset = DiagnosisSet(n_recordings=n_recordings,
                           max_recording_mins=max_recording_mins,
                           preproc_functions=preproc_functions,
                           data_folders=data_folders,
                           train_or_eval='train',
                           sensor_types=sensor_types)
    if test_on_eval:
        if test_recording_mins is None:
            test_recording_mins = duration_recording_mins
        test_preproc_functions = copy(preproc_functions)
        test_preproc_functions[1] = lambda data, fs: (data[:, :int(
            test_recording_mins * 60 * fs)], fs)
        test_dataset = DiagnosisSet(n_recordings=n_recordings,
                                    max_recording_mins=None,
                                    preproc_functions=test_preproc_functions,
                                    data_folders=data_folders,
                                    train_or_eval='eval',
                                    sensor_types=sensor_types)
    X, y = dataset.load()
    max_shape = np.max([list(x.shape) for x in X], axis=0)
    assert max_shape[1] == int(duration_recording_mins * sampling_freq * 60)
    if test_on_eval:
        test_X, test_y = test_dataset.load()
        max_shape = np.max([list(x.shape) for x in test_X], axis=0)
        assert max_shape[1] == int(test_recording_mins * sampling_freq * 60)
    if not test_on_eval:
        splitter = TrainValidTestSplitter(n_folds,
                                          i_test_fold,
                                          shuffle=shuffle)
        train_set, valid_set, test_set = splitter.split(X, y)
    else:
        splitter = TrainValidSplitter(n_folds,
                                      i_valid_fold=i_test_fold,
                                      shuffle=shuffle)
        train_set, valid_set = splitter.split(X, y)
        test_set = SignalAndTarget(test_X, test_y)
        del test_X, test_y
    del X, y  # shouldn't be necessary, but just to make sure

    set_random_seeds(seed=20170629, cuda=cuda)
    n_classes = 2
    if model_name == 'shallow':
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_start_chans,
            n_filters_spat=n_start_chans,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length).create_network()
    elif model_name == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         n_filters_time=n_start_chans,
                         n_filters_spat=n_start_chans,
                         input_time_length=input_time_length,
                         n_filters_2=int(n_start_chans * n_chan_factor),
                         n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
                         n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
                         final_conv_length=final_conv_length,
                         stride_before_pool=True).create_network()
    elif (model_name == 'deep_smac'):
        if model_name == 'deep_smac':
            do_batch_norm = False
        else:
            assert model_name == 'deep_smac_bnorm'
            do_batch_norm = True
        double_time_convs = False
        drop_prob = 0.244445
        filter_length_2 = 12
        filter_length_3 = 14
        filter_length_4 = 12
        filter_time_length = 21
        final_conv_length = 1
        first_nonlin = elu
        first_pool_mode = 'mean'
        first_pool_nonlin = identity
        later_nonlin = elu
        later_pool_mode = 'mean'
        later_pool_nonlin = identity
        n_filters_factor = 1.679066
        n_filters_start = 32
        pool_time_length = 1
        pool_time_stride = 2
        split_first_layer = True
        n_chan_factor = n_filters_factor
        n_start_chans = n_filters_start
        model = Deep4Net(n_chans,
                         n_classes,
                         n_filters_time=n_start_chans,
                         n_filters_spat=n_start_chans,
                         input_time_length=input_time_length,
                         n_filters_2=int(n_start_chans * n_chan_factor),
                         n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
                         n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
                         final_conv_length=final_conv_length,
                         batch_norm=do_batch_norm,
                         double_time_convs=double_time_convs,
                         drop_prob=drop_prob,
                         filter_length_2=filter_length_2,
                         filter_length_3=filter_length_3,
                         filter_length_4=filter_length_4,
                         filter_time_length=filter_time_length,
                         first_nonlin=first_nonlin,
                         first_pool_mode=first_pool_mode,
                         first_pool_nonlin=first_pool_nonlin,
                         later_nonlin=later_nonlin,
                         later_pool_mode=later_pool_mode,
                         later_pool_nonlin=later_pool_nonlin,
                         pool_time_length=pool_time_length,
                         pool_time_stride=pool_time_stride,
                         split_first_layer=split_first_layer,
                         stride_before_pool=True).create_network()
    elif model_name == 'shallow_smac':
        conv_nonlin = identity
        do_batch_norm = True
        drop_prob = 0.328794
        filter_time_length = 56
        final_conv_length = 22
        n_filters_spat = 73
        n_filters_time = 24
        pool_mode = 'max'
        pool_nonlin = identity
        pool_time_length = 84
        pool_time_stride = 3
        split_first_layer = True
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_filters_time,
            n_filters_spat=n_filters_spat,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length,
            conv_nonlin=conv_nonlin,
            batch_norm=do_batch_norm,
            drop_prob=drop_prob,
            filter_time_length=filter_time_length,
            pool_mode=pool_mode,
            pool_nonlin=pool_nonlin,
            pool_time_length=pool_time_length,
            pool_time_stride=pool_time_stride,
            split_first_layer=split_first_layer,
        ).create_network()
    elif model_name == 'linear':
        model = nn.Sequential()
        model.add_module("conv_classifier",
                         nn.Conv2d(n_chans, n_classes, (600, 1)))
        model.add_module('softmax', nn.LogSoftmax())
        model.add_module('squeeze', Expression(lambda x: x.squeeze(3)))
    else:
        assert False, "unknown model name {:s}".format(model_name)
    to_dense_prediction_model(model)
    log.info("Model:\n{:s}".format(str(model)))
    if cuda:
        model.cuda()
    # determine output size
    test_input = np_to_var(
        np.ones((2, n_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    log.info("In shape: {:s}".format(str(test_input.cpu().data.numpy().shape)))

    out = model(test_input)
    log.info("Out shape: {:s}".format(str(out.cpu().data.numpy().shape)))
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    log.info("{:d} predictions per input/trial".format(n_preds_per_input))
    iterator = CropsFromTrialsIterator(batch_size=batch_size,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)
    optimizer = optim.Adam(model.parameters(), lr=init_lr)

    loss_function = lambda preds, targets: F.nll_loss(
        th.mean(preds, dim=2, keepdim=False), targets)

    if model_constraint is not None:
        assert model_constraint == 'defaultnorm'
        model_constraint = MaxNormDefaultConstraint()
    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedDiagnosisMonitor(input_time_length, n_preds_per_input),
        RuntimeMonitor(),
    ]
    stop_criterion = MaxEpochs(max_epochs)
    batch_modifier = None
    run_after_early_stop = True
    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator,
                     loss_function,
                     optimizer,
                     model_constraint,
                     monitors,
                     stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=run_after_early_stop,
                     batch_modifier=batch_modifier,
                     cuda=cuda)
    exp.run()
    return exp
コード例 #18
0
def run_exp(test_on_eval, sensor_types, n_chans, max_recording_mins,
            test_recording_mins, n_recordings, sec_to_cut_at_start,
            sec_to_cut_at_end, duration_recording_mins, max_abs_val,
            clip_before_resample, sampling_freq, divisor, n_folds, i_test_fold,
            shuffle, merge_train_valid, model_name, n_start_chans,
            n_chan_factor, input_time_length, final_conv_length,
            stride_before_pool, optimizer, learning_rate, weight_decay,
            scheduler, model_constraint, batch_size, max_epochs, log_dir,
            only_return_exp, np_th_seed):

    cuda = True
    if ('smac' in model_name) and (input_time_length is None):
        input_time_length = 12000
        fix_input_length_for_smac = True
    else:
        fix_input_length_for_smac = False
    set_random_seeds(seed=np_th_seed, cuda=cuda)
    n_classes = 2
    if model_name == 'shallow':
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_start_chans,
            n_filters_spat=n_start_chans,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length).create_network()
    elif model_name == 'deep':
        model = Deep4Net(
            n_chans,
            n_classes,
            n_filters_time=n_start_chans,
            n_filters_spat=n_start_chans,
            input_time_length=input_time_length,
            n_filters_2=int(n_start_chans * n_chan_factor),
            n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
            n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
            final_conv_length=final_conv_length,
            stride_before_pool=stride_before_pool).create_network()
    elif (model_name == 'deep_smac') or (model_name == 'deep_smac_bnorm'):
        if model_name == 'deep_smac':
            do_batch_norm = False
        else:
            assert model_name == 'deep_smac_bnorm'
            do_batch_norm = True
        double_time_convs = False
        drop_prob = 0.244445
        filter_length_2 = 12
        filter_length_3 = 14
        filter_length_4 = 12
        filter_time_length = 21
        final_conv_length = 1
        first_nonlin = elu
        first_pool_mode = 'mean'
        first_pool_nonlin = identity
        later_nonlin = elu
        later_pool_mode = 'mean'
        later_pool_nonlin = identity
        n_filters_factor = 1.679066
        n_filters_start = 32
        pool_time_length = 1
        pool_time_stride = 2
        split_first_layer = True
        n_chan_factor = n_filters_factor
        n_start_chans = n_filters_start
        model = Deep4Net(n_chans,
                         n_classes,
                         n_filters_time=n_start_chans,
                         n_filters_spat=n_start_chans,
                         input_time_length=input_time_length,
                         n_filters_2=int(n_start_chans * n_chan_factor),
                         n_filters_3=int(n_start_chans * (n_chan_factor**2.0)),
                         n_filters_4=int(n_start_chans * (n_chan_factor**3.0)),
                         final_conv_length=final_conv_length,
                         batch_norm=do_batch_norm,
                         double_time_convs=double_time_convs,
                         drop_prob=drop_prob,
                         filter_length_2=filter_length_2,
                         filter_length_3=filter_length_3,
                         filter_length_4=filter_length_4,
                         filter_time_length=filter_time_length,
                         first_nonlin=first_nonlin,
                         first_pool_mode=first_pool_mode,
                         first_pool_nonlin=first_pool_nonlin,
                         later_nonlin=later_nonlin,
                         later_pool_mode=later_pool_mode,
                         later_pool_nonlin=later_pool_nonlin,
                         pool_time_length=pool_time_length,
                         pool_time_stride=pool_time_stride,
                         split_first_layer=split_first_layer,
                         stride_before_pool=True).create_network()
    elif model_name == 'shallow_smac':
        conv_nonlin = identity
        do_batch_norm = True
        drop_prob = 0.328794
        filter_time_length = 56
        final_conv_length = 22
        n_filters_spat = 73
        n_filters_time = 24
        pool_mode = 'max'
        pool_nonlin = identity
        pool_time_length = 84
        pool_time_stride = 3
        split_first_layer = True
        model = ShallowFBCSPNet(
            in_chans=n_chans,
            n_classes=n_classes,
            n_filters_time=n_filters_time,
            n_filters_spat=n_filters_spat,
            input_time_length=input_time_length,
            final_conv_length=final_conv_length,
            conv_nonlin=conv_nonlin,
            batch_norm=do_batch_norm,
            drop_prob=drop_prob,
            filter_time_length=filter_time_length,
            pool_mode=pool_mode,
            pool_nonlin=pool_nonlin,
            pool_time_length=pool_time_length,
            pool_time_stride=pool_time_stride,
            split_first_layer=split_first_layer,
        ).create_network()
    elif model_name == 'deep_smac_new':
        from torch.nn.functional import elu, relu, relu6, tanh
        from braindecode.torch_ext.functions import identity, square, safe_log
        n_filters_factor = 1.9532637176784269
        n_filters_start = 61

        deep_kwargs = {
            "batch_norm": False,
            "double_time_convs": False,
            "drop_prob": 0.3622676569047184,
            "filter_length_2": 9,
            "filter_length_3": 6,
            "filter_length_4": 10,
            "filter_time_length": 17,
            "final_conv_length": 5,
            "first_nonlin": elu,
            "first_pool_mode": "max",
            "first_pool_nonlin": identity,
            "later_nonlin": relu6,
            "later_pool_mode": "max",
            "later_pool_nonlin": identity,
            "n_filters_time": n_filters_start,
            "n_filters_spat": n_filters_start,
            "n_filters_2": int(n_filters_start * n_filters_factor),
            "n_filters_3": int(n_filters_start * (n_filters_factor**2.0)),
            "n_filters_4": int(n_filters_start * (n_filters_factor**3.0)),
            "pool_time_length": 1,
            "pool_time_stride": 4,
            "split_first_layer": True,
            "stride_before_pool": True,
        }

        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         **deep_kwargs).create_network()
    elif model_name == 'shallow_smac_new':
        from torch.nn.functional import elu, relu, relu6, tanh
        from braindecode.torch_ext.functions import identity, square, safe_log
        shallow_kwargs = {
            "conv_nonlin": square,
            "batch_norm": True,
            "drop_prob": 0.10198630723385381,
            "filter_time_length": 51,
            "final_conv_length": 1,
            "n_filters_spat": 200,
            "n_filters_time": 76,
            "pool_mode": "max",
            "pool_nonlin": safe_log,
            "pool_time_length": 139,
            "pool_time_stride": 49,
            "split_first_layer": True,
        }

        model = ShallowFBCSPNet(in_chans=n_chans,
                                n_classes=n_classes,
                                input_time_length=input_time_length,
                                **shallow_kwargs).create_network()
    elif model_name == 'linear':
        model = nn.Sequential()
        model.add_module("conv_classifier",
                         nn.Conv2d(n_chans, n_classes, (600, 1)))
        model.add_module('softmax', nn.LogSoftmax())
        model.add_module('squeeze', Expression(lambda x: x.squeeze(3)))
    elif model_name == '3path':
        virtual_chan_1x1_conv = True
        mean_across_features = False
        drop_prob = 0.5
        n_start_filters = 10
        early_bnorm = False
        n_classifier_filters = 100
        later_kernel_len = 5
        extra_conv_stride = 4
        # dont forget to reset n_preds_per_blabla
        model = create_multi_start_path_net(
            in_chans=n_chans,
            virtual_chan_1x1_conv=virtual_chan_1x1_conv,
            n_start_filters=n_start_filters,
            early_bnorm=early_bnorm,
            later_kernel_len=later_kernel_len,
            extra_conv_stride=extra_conv_stride,
            mean_across_features=mean_across_features,
            n_classifier_filters=n_classifier_filters,
            drop_prob=drop_prob)
    else:
        assert False, "unknown model name {:s}".format(model_name)
    if not model_name == '3path':
        to_dense_prediction_model(model)
    log.info("Model:\n{:s}".format(str(model)))
    time_cut_off_sec = np.inf
    start_time = time.time()

    # fix input time length in case of smac models
    if fix_input_length_for_smac:
        assert ('smac' in model_name) and (input_time_length == 12000)
        if cuda:
            model.cuda()
        test_input = np_to_var(
            np.ones((2, n_chans, input_time_length, 1), dtype=np.float32))
        if cuda:
            test_input = test_input.cuda()
        try:
            out = model(test_input)
        except:
            raise ValueError("Model receptive field too large...")
        n_preds_per_input = out.cpu().data.numpy().shape[2]
        n_receptive_field = input_time_length - n_preds_per_input
        input_time_length = 2 * n_receptive_field

    exp = common.run_exp(
        max_recording_mins,
        n_recordings,
        sec_to_cut_at_start,
        sec_to_cut_at_end,
        duration_recording_mins,
        max_abs_val,
        clip_before_resample,
        sampling_freq,
        divisor,
        n_folds,
        i_test_fold,
        shuffle,
        merge_train_valid,
        model,
        input_time_length,
        optimizer,
        learning_rate,
        weight_decay,
        scheduler,
        model_constraint,
        batch_size,
        max_epochs,
        only_return_exp,
        time_cut_off_sec,
        start_time,
        test_on_eval,
        test_recording_mins,
        sensor_types,
        log_dir,
        np_th_seed,
    )

    return exp
コード例 #19
0
ファイル: test_scoring.py プロジェクト: gemeinl/braindecode-1
def test_predict_trials():
    ds = MOABBDataset('BNCI2014001', subject_ids=1)
    ds1 = ds.split([0])['0']

    # determine original trial size
    windows_ds1 = create_windows_from_events(
        ds1,
    )
    trial_size = windows_ds1[0][0].shape[1]

    # create two windows per trial, where windows maximally overlap
    window_size_samples = trial_size - 1
    window_stride_samples = 5
    windows_ds1 = create_windows_from_events(
        ds1,
        window_size_samples=window_size_samples,
        window_stride_samples=window_stride_samples,
        drop_last_window=False,
    )

    in_chans = windows_ds1[0][0].shape[0]
    n_classes = len(windows_ds1.get_metadata()['target'].unique())
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
    )
    to_dense_prediction_model(model)

    output_shape = get_output_shape(model, in_chans, window_size_samples)
    # the number of samples required to get 1 output
    receptive_field_size = window_size_samples - output_shape[-1] + 1

    preds, targets = predict_trials(model, windows_ds1)

    # some model, cropped data
    assert preds.shape[-1] + receptive_field_size - 1 == trial_size
    assert preds.shape[1] == n_classes
    assert preds.shape[0] == targets.shape[0]
    metadata = windows_ds1.get_metadata()
    expected_targets = metadata[metadata['i_window_in_trial'] == 0][
        'target'].values
    np.testing.assert_array_equal(expected_targets, targets)

    # some model, trialwise data
    windows_ds2 = create_windows_from_events(ds1)
    with pytest.warns(UserWarning, match='This function was designed to predict'
                                         ' trials from cropped datasets.'):
        predict_trials(model, windows_ds2)

    # cropped EEGClassifier, cropped data
    clf = EEGClassifier(
        model,
        criterion=torch.nn.NLLLoss,
        optimizer=optim.AdamW,
        train_split=None,
        optimizer__lr=0.0625 * 0.01,
        optimizer__weight_decay=0,
        batch_size=64,
    )
    clf.initialize()
    clf.predict_trials(windows_ds1, return_targets=True)

    # cropped EEGClassifier, trialwise data
    with pytest.warns(UserWarning, match="This method was designed to predict "
                                         "trials in cropped mode. Calling it "
                                         "when cropped is False will give the "
                                         "same result as '.predict'."):
        clf.predict_trials(windows_ds2)
コード例 #20
0
def test_eeg_classifier():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_time_length=input_time_length,
        final_conv_length=12,
    )
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]

    train_set = CroppedXyDataset(X[:60],
                                 y=y[:60],
                                 input_time_length=input_time_length,
                                 n_preds_per_input=n_preds_per_input)

    cropped_cb_train = CroppedTrialEpochScoring(
        "accuracy",
        name="train_trial_accuracy",
        lower_is_better=False,
        on_train=True,
    )

    cropped_cb_valid = CroppedTrialEpochScoring(
        "accuracy",
        on_train=False,
        name="valid_trial_accuracy",
        lower_is_better=False,
    )

    clf = EEGClassifier(
        model,
        criterion=CroppedNLLLoss,
        optimizer=optim.Adam,
        train_split=TrainTestSplit(
            train_size=0.8,
            input_time_length=input_time_length,
            n_preds_per_input=n_preds_per_input,
        ),
        batch_size=32,
        callbacks=[
            ("train_trial_accuracy", cropped_cb_train),
            ("valid_trial_accuracy", cropped_cb_valid),
        ],
    )

    clf.fit(train_set.X, train_set.y, epochs=4)

    expected = [
        {
            "batches": [
                {
                    "train_loss": 2.0750882625579834,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 3.09424090385437,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.079931378364563,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 2.3208131790161133,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            1,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            2.083086848258972,
            "train_loss_best":
            True,
            "valid_loss":
            2.3208131790161133,
            "valid_loss_best":
            True,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            True,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            True,
        },
        {
            "batches": [
                {
                    "train_loss": 1.827332615852356,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.4135494232177734,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.1295170783996582,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 1.4291356801986694,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            2,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            1.4567997058232625,
            "train_loss_best":
            True,
            "valid_loss":
            1.4291356801986694,
            "valid_loss_best":
            True,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            False,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            False,
        },
        {
            "batches": [
                {
                    "train_loss": 1.1495535373687744,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 2.356320381164551,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 0.9548418521881104,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 2.248246908187866,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            3,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            1.4869052569071453,
            "train_loss_best":
            False,
            "valid_loss":
            2.248246908187866,
            "valid_loss_best":
            False,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            False,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            False,
        },
        {
            "batches": [
                {
                    "train_loss": 1.2157528400421143,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.1182057857513428,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 0.9163083434104919,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 0.9732739925384521,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            4,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            1.083422323067983,
            "train_loss_best":
            True,
            "valid_loss":
            0.9732739925384521,
            "valid_loss_best":
            True,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            False,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            False,
        },
    ]

    history_without_dur = [{k: v
                            for k, v in h.items() if k != "dur"}
                           for h in clf.history]
    assert_deep_allclose(history_without_dur, expected, atol=1e-3, rtol=1e-3)
コード例 #21
0
ファイル: bci_center_4.py プロジェクト: stefanches7/OpenBMI
def exp(subject_id):
    dataset = MOABBDataset(dataset_name="BNCI2014001", subject_ids=subject_id)

    from braindecode.datautil.preprocess import exponential_moving_standardize
    from braindecode.datautil.preprocess import MNEPreproc, NumpyPreproc, preprocess

    low_cut_hz = 0.  # low cut frequency for filtering
    high_cut_hz = 49.  # high cut frequency for filtering
    # Parameters for exponential moving standardization
    factor_new = 1e-3
    init_block_size = 1000

    preprocessors = [
        # keep only EEG sensors
        MNEPreproc(fn='pick_types', eeg=True, meg=False, stim=False),
        # convert from volt to microvolt, directly modifying the numpy array
        NumpyPreproc(fn=lambda x: x * 1e6),
        # bandpass filter
        MNEPreproc(fn='filter', l_freq=low_cut_hz, h_freq=high_cut_hz),
        # exponential moving standardization
        # NumpyPreproc(fn=exponential_moving_standardize, factor_new=factor_new,
        #     init_block_size=init_block_size)
    ]

    # Transform the data
    preprocess(dataset, preprocessors)

    ######################################################################
    # Create model and compute windowing parameters
    # ---------------------------------------------
    #

    ######################################################################
    # In contrast to trialwise decoding, we first have to create the model
    # before we can cut the dataset into windows. This is because we need to
    # know the receptive field of the network to know how large the window
    # stride should be.
    #

    ######################################################################
    # We first choose the compute/input window size that will be fed to the
    # network during training This has to be larger than the networks
    # receptive field size and can otherwise be chosen for computational
    # efficiency (see explanations in the beginning of this tutorial). Here we
    # choose 1000 samples, which are 4 seconds for the 250 Hz sampling rate.
    #

    input_window_samples = 1000

    ######################################################################
    # Now we create the model. To enable it to be used in cropped decoding
    # efficiently, we manually set the length of the final convolution layer
    # to some length that makes the receptive field of the ConvNet smaller
    # than ``input_window_samples`` (see ``final_conv_length=30`` in the model
    # definition).
    #

    import torch
    from braindecode.util import set_random_seeds
    from braindecode.models import ShallowFBCSPNet, Deep4Net

    cuda = torch.cuda.is_available(
    )  # check if GPU is available, if True chooses to use it
    device = 'cuda:1' if cuda else 'cpu'
    if cuda:
        torch.backends.cudnn.benchmark = True
    seed = 20190706  # random seed to make results reproducible
    # Set random seed to be able to reproduce results
    set_random_seeds(seed=seed, cuda=cuda)

    n_classes = 4
    # Extract number of chans from dataset
    n_chans = dataset[0][0].shape[0]

    # model = Deep4Net(
    #     n_chans,
    #     n_classes,
    #     input_window_samples=input_window_samples,
    #     final_conv_length="auto",
    # )
    #
    #
    #
    # embedding_net = Deep4Net_origin(4, 22, input_window_samples)
    # model = FcClfNet(embedding_net)

    model = ShallowFBCSPNet(
        n_chans,
        n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=30,
    )

    print(model)

    # Send model to GPU
    if cuda:
        model.cuda(device)

    ######################################################################
    # And now we transform model with strides to a model that outputs dense
    # prediction, so we can use it to obtain predictions for all
    # crops.
    #

    from braindecode.models.util import to_dense_prediction_model, get_output_shape
    to_dense_prediction_model(model)

    n_preds_per_input = get_output_shape(model, n_chans,
                                         input_window_samples)[2]
    print("n_preds_per_input : ", n_preds_per_input)
    print(model)

    ######################################################################
    # Cut the data into windows
    # -------------------------
    #

    ######################################################################
    # In contrast to trialwise decoding, we have to supply an explicit window size and window stride to the
    # ``create_windows_from_events`` function.
    #

    import numpy as np
    from braindecode.datautil.windowers import create_windows_from_events

    trial_start_offset_seconds = -0.5
    # Extract sampling frequency, check that they are same in all datasets
    sfreq = dataset.datasets[0].raw.info['sfreq']
    assert all([ds.raw.info['sfreq'] == sfreq for ds in dataset.datasets])

    # Calculate the trial start offset in samples.
    trial_start_offset_samples = int(trial_start_offset_seconds * sfreq)

    # Create windows using braindecode function for this. It needs parameters to define how
    # trials should be used.
    windows_dataset = create_windows_from_events(
        dataset,
        trial_start_offset_samples=trial_start_offset_samples,
        trial_stop_offset_samples=0,
        window_size_samples=input_window_samples,
        window_stride_samples=n_preds_per_input,
        drop_last_window=False,
        preload=True,
    )

    ######################################################################
    # Split the dataset
    # -----------------
    #
    # This code is the same as in trialwise decoding.
    #

    from braindecode.datasets.base import BaseConcatDataset
    splitted = windows_dataset.split('session')

    train_set = splitted['session_T']
    valid_set = splitted['session_E']

    lr = 0.0625 * 0.01
    weight_decay = 0
    batch_size = 8
    n_epochs = 100

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=batch_size,
                                               shuffle=True)
    # valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=False)
    test_loader = torch.utils.data.DataLoader(valid_set,
                                              batch_size=batch_size,
                                              shuffle=False)

    # Send model to GPU
    if cuda:
        model.cuda(device)

    from torch.optim import lr_scheduler
    import torch.optim as optim

    import argparse
    parser = argparse.ArgumentParser(
        description='cross subject domain adaptation')
    parser.add_argument('--batch-size',
                        type=int,
                        default=50,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=50,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    args.gpuidx = 0
    args.seed = 0
    args.use_tensorboard = False
    args.save_model = False

    optimizer = optim.AdamW(model.parameters(),
                            lr=lr,
                            weight_decay=weight_decay)
    # scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=1)
    scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=n_epochs - 1)

    import pandas as pd
    results_columns = ['test_loss', 'test_accuracy']
    df = pd.DataFrame(columns=results_columns)

    for epochidx in range(1, n_epochs):
        print(epochidx)
        train_crop(10, model, device, train_loader, optimizer, scheduler, cuda,
                   args.gpuidx)
        test_loss, test_score = eval_crop(model, device, test_loader)
        results = {'test_loss': test_loss, 'test_accuracy': test_score}
        df = df.append(results, ignore_index=True)
        print(results)

    return df
コード例 #22
0
def test_variable_length_trials_cropped_decoding():
    cuda = False
    set_random_seeds(seed=20210726, cuda=cuda)

    # create fake tuh abnormal dataset
    tuh = _TUHAbnormalMock(path='')
    # fake variable length trials by cropping first recording
    splits = tuh.split([[i] for i in range(len(tuh.datasets))])
    preprocess(
        concat_ds=splits['0'],
        preprocessors=[
            Preprocessor('crop', tmax=300),
        ],
    )
    variable_tuh = BaseConcatDataset(
        [splits[str(i)] for i in range(len(tuh.datasets))])
    # make sure we actually have different length trials
    assert any(np.diff([ds.raw.n_times for ds in variable_tuh.datasets]) != 0)

    # create windows
    variable_tuh_windows = create_fixed_length_windows(
        concat_ds=variable_tuh,
        window_size_samples=1000,
        window_stride_samples=1000,
        drop_last_window=False,
        mapping={
            True: 1,
            False: 0
        },
    )

    # create train and valid set
    splits = variable_tuh_windows.split(
        [[i] for i in range(len(variable_tuh_windows.datasets))])
    variable_tuh_windows_train = BaseConcatDataset(
        [splits[str(i)] for i in range(len(tuh.datasets) - 1)])
    variable_tuh_windows_valid = BaseConcatDataset(
        [splits[str(len(tuh.datasets) - 1)]])
    for x, y, ind in variable_tuh_windows_train:
        break
    train_split = predefined_split(variable_tuh_windows_valid)

    # initialize a model
    model = ShallowFBCSPNet(
        in_chans=x.shape[0],
        n_classes=len(tuh.description.pathological.unique()),
    )
    to_dense_prediction_model(model)
    if cuda:
        model.cuda()

    # create and train a classifier
    clf = EEGClassifier(
        model,
        cropped=True,
        criterion=CroppedLoss,
        criterion__loss_function=torch.nn.functional.nll_loss,
        optimizer=torch.optim.Adam,
        batch_size=32,
        callbacks=['accuracy'],
        train_split=train_split,
    )
    clf.fit(variable_tuh_windows_train, y=None, epochs=3)

    # make sure it does what we expect
    np.testing.assert_allclose(
        clf.history[:, 'train_loss'],
        np.array([
            0.689495325088501,
            0.1353449523448944,
            0.006638816092163324,
        ]),
        rtol=1e-1,
        atol=1e-1,
    )

    np.testing.assert_allclose(
        clf.history[:, 'valid_loss'],
        np.array([
            2.925871,
            3.611423,
            4.23494,
        ]),
        rtol=1e-1,
        atol=1e-1,
    )
コード例 #23
0
ファイル: example.py プロジェクト: ecmyhre/high-gamma-dataset
def run_exp_on_high_gamma_dataset(train_filename, test_filename, low_cut_hz,
                                  model_name, max_epochs, max_increase_epochs,
                                  np_th_seed, debug):
    train_set, valid_set, test_set = load_train_valid_test(
        train_filename=train_filename,
        test_filename=test_filename,
        low_cut_hz=low_cut_hz,
        debug=debug)
    if debug:
        max_epochs = 4

    set_random_seeds(np_th_seed, cuda=True)
    #torch.backends.cudnn.benchmark = True# sometimes crashes?
    n_classes = int(np.max(train_set.y) + 1)
    n_chans = int(train_set.X.shape[1])
    input_time_length = 1000
    if model_name == 'deep':
        model = Deep4Net(n_chans,
                         n_classes,
                         input_time_length=input_time_length,
                         final_conv_length=2).create_network()
    elif model_name == 'shallow':
        model = ShallowFBCSPNet(n_chans,
                                n_classes,
                                input_time_length=input_time_length,
                                final_conv_length=30).create_network()

    to_dense_prediction_model(model)
    model.cuda()
    model.eval()

    out = model(np_to_var(train_set.X[:1, :, :input_time_length, None]).cuda())

    n_preds_per_input = out.cpu().data.numpy().shape[2]
    optimizer = optim.Adam(model.parameters(), weight_decay=0, lr=1e-3)

    iterator = CropsFromTrialsIterator(batch_size=60,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input,
                                       seed=np_th_seed)

    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length=input_time_length),
        RuntimeMonitor()
    ]

    model_constraint = MaxNormDefaultConstraint()

    loss_function = lambda preds, targets: F.nll_loss(th.mean(preds, dim=2),
                                                      targets)

    run_after_early_stop = True
    do_early_stop = True
    remember_best_column = 'valid_misclass'
    stop_criterion = Or([
        MaxEpochs(max_epochs),
        NoDecrease('valid_misclass', max_increase_epochs)
    ])

    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator=iterator,
                     loss_function=loss_function,
                     optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column=remember_best_column,
                     run_after_early_stop=run_after_early_stop,
                     cuda=True,
                     do_early_stop=do_early_stop)
    exp.run()
    return exp
コード例 #24
0
def create_model(in_channels, num_classes, cuda=True):
    def squeeze_out(x):
        assert x.size()[1] == num_classes and x.size()[3] == 1
        return x.squeeze(3).transpose(1, 2)

    if cfg.TRAINING.MODEL.lower() == 'rnn':
        model = RNNs(in_channels=in_channels)

    elif 'deep4' in cfg.TRAINING.MODEL.lower():
        if 'wide' in cfg.TRAINING.MODEL.lower():
            pool_length = 4
            pool_stride = 4
        elif 'narrow' in cfg.TRAINING.MODEL.lower():
            pool_length = 2
            pool_stride = 2
        else:
            pool_length = 3
            pool_stride = 3

        model = Deep4Net(in_chans=in_channels,
                         n_classes=num_classes,
                         input_time_length=cfg.TRAINING.CROP_LEN,
                         pool_time_length=pool_length,
                         pool_time_stride=pool_stride,
                         final_conv_length=2,
                         stride_before_pool=True).create_network()

        # remove softmax
        new_model = nn.Sequential()
        for name, module in model.named_children():
            if name == 'softmax':
                # continue
                break
            new_model.add_module(name, module)

        # remove empty final dimension and permute output shape
        new_model.add_module('squeeze', Expression(squeeze_out))
        # if num_classes > 1:
        #     def transpose_class_time(x):
        #         return x.transpose(2, 1)
        #
        #     new_model.add_module('trans', Expression(transpose_class_time))

        model = new_model

        to_dense_prediction_model(model)

    elif cfg.TRAINING.MODEL.lower() == 'deep5':
        #  pool_time_length=3
        #  pool_time_stride=3
        model = Deep5Net(in_chans=in_channels,
                         n_classes=num_classes,
                         input_time_length=cfg.TRAINING.CROP_LEN,
                         final_conv_length=2,
                         stride_before_pool=True).create_network()

        # remove softmax
        new_model = nn.Sequential()
        for name, module in model.named_children():
            if name == 'softmax':
                # continue
                break
            new_model.add_module(name, module)

        # remove empty final dimension and permute output shape
        new_model.add_module('squeeze', Expression(squeeze_out))
        # if num_classes > 1:
        #     def transpose_class_time(x):
        #         return x.transpose(2, 1)
        #
        #     new_model.add_module('trans', Expression(transpose_class_time))

        model = new_model

        to_dense_prediction_model(model)

    elif cfg.TRAINING.MODEL.lower() == 'shallow':
        model = Shallow(in_chans=in_channels,
                        n_classes=num_classes,
                        input_time_length=cfg.TRAINING.CROP_LEN,
                        final_conv_length=2).create_network()

        # remove softmax
        new_model = nn.Sequential()
        for name, module in model.named_children():
            if name == 'softmax':
                break
            new_model.add_module(name, module)

        # remove empty final dimension and permute output shape
        new_model.add_module('squeeze', Expression(squeeze_out))

        to_dense_prediction_model(model)

    elif cfg.TRAINING.MODEL.lower() == 'hybrid':
        model = Hybrid(in_channels=in_channels)

    elif cfg.TRAINING.MODEL.lower() == 'tcn':
        raise NotImplementedError
    else:
        assert False, f"Unknown Model {cfg.TRAINING.MODEL}"
    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.OPTIMIZATION.BASE_LR,
                           weight_decay=cfg.OPTIMIZATION.WEIGHT_DECAY)
    scheduler = CosineAnnealingLR(optimizer, T_max=cfg.TRAINING.MAX_EPOCHS)
    if cuda:
        model.cuda()

    model.eval()

    metric = lambda targets, predictions: np.corrcoef(targets, predictions)[0,
                                                                            1]
    loss_fun = mse_loss
    logger.info(model)
    return model, optimizer, scheduler, loss_fun, metric
コード例 #25
0
def test_cropped_decoding():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(
        subject_id, event_codes, update_path=False
    )

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(
            path, preload=True, stim_channel="auto", verbose="WARNING"
        )
        for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)
    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(
        raw.info, meg=False, eeg=True, stim=False, eog=False, exclude="bads"
    )

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )
    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_time_length=input_time_length,
        final_conv_length=12,
    )
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    # Perform forward pass to determine how many outputs per input
    n_preds_per_input = get_output_shape(model, in_chans, input_time_length)[2]

    train_set = CroppedXyDataset(X[:60], y[:60],
                                 input_time_length=input_time_length,
                                 n_preds_per_input=n_preds_per_input)
    valid_set = CroppedXyDataset(X[60:], y=y[60:],
                                 input_time_length=input_time_length,
                                 n_preds_per_input=n_preds_per_input)
    train_split = predefined_split(valid_set)

    clf = EEGClassifier(
        model,
        cropped=True,
        criterion=CroppedLoss,
        criterion__loss_function=torch.nn.functional.nll_loss,
        optimizer=optim.Adam,
        train_split=train_split,
        batch_size=32,
        callbacks=['accuracy'],
    )

    clf.fit(train_set, y=None, epochs=4)

    np.testing.assert_allclose(
        clf.history[:, 'train_loss'],
        np.array(
            [
                1.455306,
                1.455934,
                1.210563,
                1.065806
            ]
        ),
        rtol=1e-4,
        atol=1e-5,
    )

    np.testing.assert_allclose(
        clf.history[:, 'valid_loss'],
        np.array(
            [
                2.547288,
                1.51785,
                1.394036,
                1.064355
            ]
        ),
        rtol=1e-4,
        atol=1e-4,
    )
    np.testing.assert_allclose(
        clf.history[:, 'train_accuracy'],
        np.array(
            [
                0.5,
                0.5,
                0.5,
                0.533333
            ]
        ),
        rtol=1e-4,
        atol=1e-5,
    )
    np.testing.assert_allclose(
        clf.history[:, 'valid_accuracy'],
        np.array(
            [
                0.533333,
                0.466667,
                0.533333,
                0.5
            ]
        ),
        rtol=1e-4,
        atol=1e-5,
    )
コード例 #26
0
def test_eeg_classifier():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_window_samples = 450
    n_classes = 2
    in_chans = X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=12,
    )
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_window_samples, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]

    train_set = create_from_X_y(X[:48],
                                y[:48],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    valid_set = create_from_X_y(X[48:60],
                                y[48:60],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    cropped_cb_train = CroppedTrialEpochScoring(
        "accuracy",
        name="train_trial_accuracy",
        lower_is_better=False,
        on_train=True,
    )

    cropped_cb_valid = CroppedTrialEpochScoring(
        "accuracy",
        on_train=False,
        name="valid_trial_accuracy",
        lower_is_better=False,
    )

    clf = EEGClassifier(
        model,
        criterion=CroppedLoss,
        criterion__loss_function=nll_loss,
        optimizer=optim.Adam,
        train_split=predefined_split(valid_set),
        batch_size=32,
        callbacks=[
            ("train_trial_accuracy", cropped_cb_train),
            ("valid_trial_accuracy", cropped_cb_valid),
        ],
    )

    clf.fit(train_set, y=None, epochs=4)

    expected = [
        {
            "batches": [
                {
                    "train_loss": 1.9391239881515503,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 2.895704507827759,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.0713893175125122,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 1.1811838150024414,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            1,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            1.9687392711639404,
            "train_loss_best":
            True,
            "valid_loss":
            1.1811838150024414,
            "valid_loss_best":
            True,
            "train_trial_accuracy":
            0.4791666666666667,
            "train_trial_accuracy_best":
            True,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            True,
        },
        {
            "batches": [
                {
                    "train_loss": 1.5488793849945068,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.1174801588058472,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.1525697708129883,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 2.202029228210449,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            2,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            1.2729764382044475,
            "train_loss_best":
            True,
            "valid_loss":
            2.202029228210449,
            "valid_loss_best":
            False,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            True,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            False,
        },
        {
            "batches": [
                {
                    "train_loss": 1.0049529075622559,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.0266971588134766,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.0799436569213867,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 1.0638500452041626,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            3,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            1.0371979077657063,
            "train_loss_best":
            True,
            "valid_loss":
            1.0638500452041626,
            "valid_loss_best":
            True,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            False,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            False,
        },
        {
            "batches": [
                {
                    "train_loss": 1.0052555799484253,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 0.8479514718055725,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 0.9589881300926208,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 0.8794112801551819,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            4,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            0.9373983939488729,
            "train_loss_best":
            True,
            "valid_loss":
            0.8794112801551819,
            "valid_loss_best":
            True,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            False,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            False,
        },
    ]

    history_without_dur = [{k: v
                            for k, v in h.items() if k != "dur"}
                           for h in clf.history]
    assert_deep_allclose(expected, history_without_dur, atol=1e-3, rtol=1e-3)
コード例 #27
0
def test_eeg_classifier():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_window_samples = 450
    n_classes = 2
    in_chans = X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=12,
    )
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_window_samples, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]

    train_set = create_from_X_y(X[:48],
                                y[:48],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    valid_set = create_from_X_y(X[48:60],
                                y[48:60],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    cropped_cb_train = CroppedTrialEpochScoring(
        "accuracy",
        name="train_trial_accuracy",
        lower_is_better=False,
        on_train=True,
    )

    cropped_cb_valid = CroppedTrialEpochScoring(
        "accuracy",
        on_train=False,
        name="valid_trial_accuracy",
        lower_is_better=False,
    )

    clf = EEGClassifier(
        model,
        criterion=CroppedLoss,
        criterion__loss_function=nll_loss,
        optimizer=optim.Adam,
        train_split=predefined_split(valid_set),
        batch_size=32,
        callbacks=[
            ("train_trial_accuracy", cropped_cb_train),
            ("valid_trial_accuracy", cropped_cb_valid),
        ],
    )

    clf.fit(train_set, y=None, epochs=4)

    expected = [{
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.9391239881515503
        }, {
            'train_batch_size': 32,
            'train_loss': 2.895704507827759
        }, {
            'train_batch_size': 32,
            'train_loss': 1.0713887214660645
        }, {
            'valid_batch_size': 24,
            'valid_loss': 1.18110191822052
        }],
        'epoch':
        1,
        'train_batch_count':
        3,
        'train_loss':
        1.9687390724817913,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.4791666666666667,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        1.18110191822052,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        True
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.6741573810577393
        }, {
            'train_batch_size': 32,
            'train_loss': 0.9984264373779297
        }, {
            'train_batch_size': 32,
            'train_loss': 1.1340471506118774
        }, {
            'valid_batch_size': 24,
            'valid_loss': 2.5375664234161377
        }],
        'epoch':
        2,
        'train_batch_count':
        3,
        'train_loss':
        1.2688769896825154,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.5,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        2.5375664234161377,
        'valid_loss_best':
        False,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        False
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 0.8795645833015442
        }, {
            'train_batch_size': 32,
            'train_loss': 1.0339491367340088
        }, {
            'train_batch_size': 32,
            'train_loss': 1.19275963306427
        }, {
            'valid_batch_size': 24,
            'valid_loss': 1.655737042427063
        }],
        'epoch':
        3,
        'train_batch_count':
        3,
        'train_loss':
        1.0354244510332744,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.5,
        'train_trial_accuracy_best':
        False,
        'valid_batch_count':
        1,
        'valid_loss':
        1.655737042427063,
        'valid_loss_best':
        False,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        False
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.1963350772857666
        }, {
            'train_batch_size': 32,
            'train_loss': 0.8621770143508911
        }, {
            'train_batch_size': 32,
            'train_loss': 1.099318265914917
        }, {
            'valid_batch_size': 24,
            'valid_loss': 1.0293445587158203
        }],
        'epoch':
        4,
        'train_batch_count':
        3,
        'train_loss':
        1.0526101191838582,
        'train_loss_best':
        False,
        'train_trial_accuracy':
        0.625,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        1.0293445587158203,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.25,
        'valid_trial_accuracy_best':
        False
    }]

    history_without_dur = [{k: v
                            for k, v in h.items() if k != "dur"}
                           for h in clf.history]
    assert_deep_allclose(expected, history_without_dur, atol=1e-3, rtol=1e-3)
コード例 #28
0
def exp(subject_id):
    import torch
    input_window_samples = 1000

    cuda = torch.cuda.is_available()  # check if GPU is available, if True chooses to use it
    device = 'cuda:0' if cuda else 'cpu'
    if cuda:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    seed = 20190706  # random seed to make results reproducible
    # Set random seed to be able to reproduce results
    random.seed(seed)
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)

    n_classes = 4

    PATH = '../datasets/'
    with open(PATH + 'bcic_datasets_[0,49].pkl', 'rb') as f:
        data = pickle.load(f)

    import torch

    print('subject:' + str(subject_id))


    #make train test
    tr = []
    val =[]
    test_train_split = 0.5

    dataset= data[subject_id]

    dataset_size = len(dataset)
    indices = list(range(dataset_size))

    test_split = int(np.floor(test_train_split * dataset_size))

    train_indices, test_indices = indices[:test_split], indices[test_split:]

    np.random.shuffle(train_indices)
    #분석
    sample_data = data[0].dataset
    sample_data.psd()
    from mne.viz import plot_epochs_image
    import mne
    plot_epochs_image(sample_data, picks=['C3','C4'])

    label = sample_data.read_label()

    sample_data.plot_projs_topomap()

    train_sampler = SubsetRandomSampler(train_indices)
    test_sampler = SubsetRandomSampler(test_indices)

    from braindecode.models import ShallowFBCSPNet
    model = ShallowFBCSPNet(
        22,
        n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=30,
    )

    from braindecode.models.util import to_dense_prediction_model, get_output_shape
    to_dense_prediction_model(model)

    n_preds_per_input = get_output_shape(model, 22, input_window_samples)[2]
    print("n_preds_per_input : ", n_preds_per_input)
    print(model)


    # crop_size =1000
    #
    #
    #
    #
    # model = ShallowNet_dense(n_classes, 22, crop_size)
    #
    # print(model)

    epochs = 100

    # For deep4 they should be:
    lr = 1 * 0.01
    weight_decay = 0.5 * 0.001

    batch_size = 8

    train_set = torch.utils.data.Subset(dataset,indices= train_indices)
    test_set = torch.utils.data.Subset(dataset,indices= test_indices)

    train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)
    # Send model to GPU
    if cuda:
        model.cuda(device=device)

    from torch.optim import lr_scheduler
    import torch.optim as optim

    import argparse
    parser = argparse.ArgumentParser(description='cross subject domain adaptation')
    parser.add_argument('--batch-size', type=int, default=50, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=50, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=100, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model', action='store_true', default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    args.gpuidx = 0
    args.seed = 0
    args.use_tensorboard = False
    args.save_model = False

    lr = 0.0625 * 0.01
    weight_decay = 0
    optimizer = optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
    # scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
    scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max = epochs-1)
    #
    # #test lr
    # lr = []
    # for i in range(200):
    #     scheduler.step()
    #     lr.append(scheduler.get_lr())
    #
    # import matplotlib.pyplot as plt
    # plt.plot(lr)

    import pandas as pd
    results_columns = ['test_loss', 'test_accuracy']
    df = pd.DataFrame(columns=results_columns)

    for epochidx in range(1, epochs):
        print(epochidx)
        train_crop(10, model, device, train_loader,optimizer,scheduler,cuda, args.gpuidx)
        test_loss, test_score = eval_crop(model, device, test_loader)
        results = {'test_loss': test_loss, 'test_accuracy': test_score}
        df = df.append(results, ignore_index=True)
        print(results)

    return df
コード例 #29
0
    optimizer_lr = 0.01
    optimizer_weight_decay = 0.0005
else:
    raise ValueError(f'{model_name} unknown')

new_model = torch.nn.Sequential()
for name, module_ in model.named_children():
    if "softmax" in name:
        continue
    new_model.add_module(name, module_)
model = new_model

if cuda:
    model.cuda()

to_dense_prediction_model(model)
n_preds_per_input = get_output_shape(model, n_chans, input_window_samples)[2]

train_set, valid_set = create_compatible_dataset('./data/BCICIV_4_mat/sub1_comp.mat')

# dataset = create_fixed_length_windows(
#     dataset,
#     start_offset_samples=0,
#     stop_offset_samples=0,
#     window_size_samples=input_window_samples,
#     window_stride_samples=n_preds_per_input,
#     drop_last_window=False,
#     drop_bad_windows=True,
# )

# splits = dataset.split("session")
コード例 #30
0
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda):
    train_filename = 'A{:01d}T.gdf'.format(subject_id)
    test_filename = 'A{:01d}E.gdf'.format(subject_id)

    train_filepath = os.path.join(data_folder, train_filename)
    test_filepath = os.path.join(data_folder, test_filename)

    train_label_filepath = train_filepath.replace('.gdf', '.mat')

    test_label_filepath = test_filepath.replace('.gdf', '.mat')

    train_loader = BCICompetition4Set2A(
        train_filepath, labels_filename=train_label_filepath)
    test_loader = BCICompetition4Set2A(
        test_filepath, labels_filename=test_label_filepath)
    #print(train_loader)
    train_cnt = train_loader.load()
    test_cnt = test_loader.load()

    # Preprocessing

    train_cnt = train_cnt.drop_channels(['STI 014', 'EOG-left',
                                         'EOG-central', 'EOG-right'])
    assert len(train_cnt.ch_names) == 22
    # lets convert to millvolt for numerical stability of next operations
    train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
    train_cnt = mne_apply(
        lambda a: bandpass_cnt(a, low_cut_hz, 38, train_cnt.info['sfreq'],
                               filt_order=3,
                               axis=1), train_cnt)
    train_cnt = mne_apply(
        lambda a: exponential_running_standardize(a.T, factor_new=1e-3,
                                                  init_block_size=1000,
                                                  eps=1e-4).T,
        train_cnt)

    test_cnt = test_cnt.drop_channels(['STI 014', 'EOG-left',
                                       'EOG-central', 'EOG-right'])
    assert len(test_cnt.ch_names) == 22
    test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
    test_cnt = mne_apply(
        lambda a: bandpass_cnt(a, low_cut_hz, 38, test_cnt.info['sfreq'],
                               filt_order=3,
                               axis=1), test_cnt)
    test_cnt = mne_apply(
        lambda a: exponential_running_standardize(a.T, factor_new=1e-3,
                                                  init_block_size=1000,
                                                  eps=1e-4).T,
        test_cnt)

    marker_def = OrderedDict([('Left Hand', [1]), ('Right Hand', [2],),
                              ('Foot', [3]), ('Tongue', [4])])
    ival = [-500, 4000]

    train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
    test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)

    train_set, valid_set = split_into_two_sets(train_set,
                                               first_set_fraction=0.8)

    set_random_seeds(seed=20190706, cuda=cuda)

    n_classes = 4
    n_chans = int(train_set.X.shape[1])
    input_time_length=1000
    if model == 'shallow':
        model = ShallowFBCSPNet(n_chans, n_classes, input_time_length=input_time_length,
                            final_conv_length=30).create_network()
    elif model == 'deep':
        model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length,
                            final_conv_length=2).create_network()


    to_dense_prediction_model(model)
    if cuda:
        model.cuda()

    log.info("Model: \n{:s}".format(str(model)))
    dummy_input = np_to_var(train_set.X[:1, :, :, None])
    if cuda:
        dummy_input = dummy_input.cuda()
    out = model(dummy_input)

    n_preds_per_input = out.cpu().data.numpy().shape[2]

    optimizer = optim.Adam(model.parameters())

    iterator = CropsFromTrialsIterator(batch_size=60,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)

    stop_criterion = Or([MaxEpochs(800),
                         NoDecrease('valid_misclass', 80)])

    monitors = [LossMonitor(), MisclassMonitor(col_suffix='sample_misclass'),
                CroppedTrialMisclassMonitor(
                    input_time_length=input_time_length), RuntimeMonitor()]

    model_constraint = MaxNormDefaultConstraint()

    loss_function = lambda preds, targets: F.nll_loss(
        th.mean(preds, dim=2, keepdim=False), targets)

    exp = Experiment(model, train_set, valid_set, test_set, iterator=iterator,
                     loss_function=loss_function, optimizer=optimizer,
                     model_constraint=model_constraint,
                     monitors=monitors,
                     stop_criterion=stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True, cuda=cuda)
    exp.run()
    return exp