def get_deep_learning_model(model_args, valid_dataset):
    cuda = torch.cuda.is_available()
    device = model_args["device"] if cuda else 'cpu'
    if cuda:
        torch.backends.cudnn.benchmark = True
    seed = model_args["seed"]
    # = 20200220  random seed to make results reproducible
    # Set random seed to be able to reproduce results
    if seed:
        set_random_seeds(seed=seed, cuda=cuda)

    if model_args["model_type"] == "ShallowFBCSPNet":
        model = ShallowFBCSPNet(
            model_args["n_chans"],
            model_args["n_classes"] + 1,
            input_window_samples=model_args["input_window_samples"],
            final_conv_length='auto',
        )
    elif model_args["model_type"] == "SleepStager":
        model = model = SleepStager(
            n_channels=model_args["n_chans"],
            sfreq=model_args["sfreq"],
            n_classes=model_args["n_classes"] + 1,
            input_size_s=model_args["input_window_samples"] /
            model_args["sfreq"],
        )
    else:
        raise ValueError("Boom !")

    if cuda:
        model.cuda()

    clf = EEGClassifier(
        model,
        criterion=model_args["criterion"],
        optimizer=torch.optim.AdamW,
        # using test_sample for validation
        train_split=predefined_split(valid_dataset),
        optimizer__lr=model_args["lr"],
        optimizer__weight_decay=model_args["weight_decay"],
        batch_size=model_args["batch_size"],
        callbacks=[
            "accuracy",
            ("lr_scheduler",
             LRScheduler('CosineAnnealingLR',
                         T_max=model_args["n_epochs"] - 1)),
            ("early_stopping",
             EarlyStopping(monitor='valid_loss',
                           patience=model_args["patience"]))
        ],
        device=device,
        iterator_train__num_workers=20,
        iterator_train__pin_memory=True)  # torch.in torch.out

    return clf
Example #2
0
def exp(subject_id):
    dataset = MOABBDataset(dataset_name="BNCI2014001", subject_ids=subject_id)

    from braindecode.datautil.preprocess import exponential_moving_standardize
    from braindecode.datautil.preprocess import MNEPreproc, NumpyPreproc, preprocess

    low_cut_hz = 0.  # low cut frequency for filtering
    high_cut_hz = 49.  # high cut frequency for filtering
    # Parameters for exponential moving standardization
    factor_new = 1e-3
    init_block_size = 1000

    preprocessors = [
        # keep only EEG sensors
        MNEPreproc(fn='pick_types', eeg=True, meg=False, stim=False),
        # convert from volt to microvolt, directly modifying the numpy array
        NumpyPreproc(fn=lambda x: x * 1e6),
        # bandpass filter
        MNEPreproc(fn='filter', l_freq=low_cut_hz, h_freq=high_cut_hz),
        # exponential moving standardization
        # NumpyPreproc(fn=exponential_moving_standardize, factor_new=factor_new,
        #     init_block_size=init_block_size)
    ]

    # Transform the data
    preprocess(dataset, preprocessors)

    ######################################################################
    # Create model and compute windowing parameters
    # ---------------------------------------------
    #

    ######################################################################
    # In contrast to trialwise decoding, we first have to create the model
    # before we can cut the dataset into windows. This is because we need to
    # know the receptive field of the network to know how large the window
    # stride should be.
    #

    ######################################################################
    # We first choose the compute/input window size that will be fed to the
    # network during training This has to be larger than the networks
    # receptive field size and can otherwise be chosen for computational
    # efficiency (see explanations in the beginning of this tutorial). Here we
    # choose 1000 samples, which are 4 seconds for the 250 Hz sampling rate.
    #

    input_window_samples = 1000

    ######################################################################
    # Now we create the model. To enable it to be used in cropped decoding
    # efficiently, we manually set the length of the final convolution layer
    # to some length that makes the receptive field of the ConvNet smaller
    # than ``input_window_samples`` (see ``final_conv_length=30`` in the model
    # definition).
    #

    import torch
    from braindecode.util import set_random_seeds
    from braindecode.models import ShallowFBCSPNet, Deep4Net

    cuda = torch.cuda.is_available(
    )  # check if GPU is available, if True chooses to use it
    device = 'cuda:1' if cuda else 'cpu'
    if cuda:
        torch.backends.cudnn.benchmark = True
    seed = 20190706  # random seed to make results reproducible
    # Set random seed to be able to reproduce results
    set_random_seeds(seed=seed, cuda=cuda)

    n_classes = 4
    # Extract number of chans from dataset
    n_chans = dataset[0][0].shape[0]

    # model = Deep4Net(
    #     n_chans,
    #     n_classes,
    #     input_window_samples=input_window_samples,
    #     final_conv_length="auto",
    # )
    #
    #
    #
    # embedding_net = Deep4Net_origin(4, 22, input_window_samples)
    # model = FcClfNet(embedding_net)

    model = ShallowFBCSPNet(
        n_chans,
        n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=30,
    )

    print(model)

    # Send model to GPU
    if cuda:
        model.cuda(device)

    ######################################################################
    # And now we transform model with strides to a model that outputs dense
    # prediction, so we can use it to obtain predictions for all
    # crops.
    #

    from braindecode.models.util import to_dense_prediction_model, get_output_shape
    to_dense_prediction_model(model)

    n_preds_per_input = get_output_shape(model, n_chans,
                                         input_window_samples)[2]
    print("n_preds_per_input : ", n_preds_per_input)
    print(model)

    ######################################################################
    # Cut the data into windows
    # -------------------------
    #

    ######################################################################
    # In contrast to trialwise decoding, we have to supply an explicit window size and window stride to the
    # ``create_windows_from_events`` function.
    #

    import numpy as np
    from braindecode.datautil.windowers import create_windows_from_events

    trial_start_offset_seconds = -0.5
    # Extract sampling frequency, check that they are same in all datasets
    sfreq = dataset.datasets[0].raw.info['sfreq']
    assert all([ds.raw.info['sfreq'] == sfreq for ds in dataset.datasets])

    # Calculate the trial start offset in samples.
    trial_start_offset_samples = int(trial_start_offset_seconds * sfreq)

    # Create windows using braindecode function for this. It needs parameters to define how
    # trials should be used.
    windows_dataset = create_windows_from_events(
        dataset,
        trial_start_offset_samples=trial_start_offset_samples,
        trial_stop_offset_samples=0,
        window_size_samples=input_window_samples,
        window_stride_samples=n_preds_per_input,
        drop_last_window=False,
        preload=True,
    )

    ######################################################################
    # Split the dataset
    # -----------------
    #
    # This code is the same as in trialwise decoding.
    #

    from braindecode.datasets.base import BaseConcatDataset
    splitted = windows_dataset.split('session')

    train_set = splitted['session_T']
    valid_set = splitted['session_E']

    lr = 0.0625 * 0.01
    weight_decay = 0
    batch_size = 8
    n_epochs = 100

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=batch_size,
                                               shuffle=True)
    # valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=False)
    test_loader = torch.utils.data.DataLoader(valid_set,
                                              batch_size=batch_size,
                                              shuffle=False)

    # Send model to GPU
    if cuda:
        model.cuda(device)

    from torch.optim import lr_scheduler
    import torch.optim as optim

    import argparse
    parser = argparse.ArgumentParser(
        description='cross subject domain adaptation')
    parser.add_argument('--batch-size',
                        type=int,
                        default=50,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=50,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    args.gpuidx = 0
    args.seed = 0
    args.use_tensorboard = False
    args.save_model = False

    optimizer = optim.AdamW(model.parameters(),
                            lr=lr,
                            weight_decay=weight_decay)
    # scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=1)
    scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=n_epochs - 1)

    import pandas as pd
    results_columns = ['test_loss', 'test_accuracy']
    df = pd.DataFrame(columns=results_columns)

    for epochidx in range(1, n_epochs):
        print(epochidx)
        train_crop(10, model, device, train_loader, optimizer, scheduler, cuda,
                   args.gpuidx)
        test_loss, test_score = eval_crop(model, device, test_loader)
        results = {'test_loss': test_loss, 'test_accuracy': test_score}
        df = df.append(results, ignore_index=True)
        print(results)

    return df
def test_variable_length_trials_cropped_decoding():
    cuda = False
    set_random_seeds(seed=20210726, cuda=cuda)

    # create fake tuh abnormal dataset
    tuh = _TUHAbnormalMock(path='')
    # fake variable length trials by cropping first recording
    splits = tuh.split([[i] for i in range(len(tuh.datasets))])
    preprocess(
        concat_ds=splits['0'],
        preprocessors=[
            Preprocessor('crop', tmax=300),
        ],
    )
    variable_tuh = BaseConcatDataset(
        [splits[str(i)] for i in range(len(tuh.datasets))])
    # make sure we actually have different length trials
    assert any(np.diff([ds.raw.n_times for ds in variable_tuh.datasets]) != 0)

    # create windows
    variable_tuh_windows = create_fixed_length_windows(
        concat_ds=variable_tuh,
        window_size_samples=1000,
        window_stride_samples=1000,
        drop_last_window=False,
        mapping={
            True: 1,
            False: 0
        },
    )

    # create train and valid set
    splits = variable_tuh_windows.split(
        [[i] for i in range(len(variable_tuh_windows.datasets))])
    variable_tuh_windows_train = BaseConcatDataset(
        [splits[str(i)] for i in range(len(tuh.datasets) - 1)])
    variable_tuh_windows_valid = BaseConcatDataset(
        [splits[str(len(tuh.datasets) - 1)]])
    for x, y, ind in variable_tuh_windows_train:
        break
    train_split = predefined_split(variable_tuh_windows_valid)

    # initialize a model
    model = ShallowFBCSPNet(
        in_chans=x.shape[0],
        n_classes=len(tuh.description.pathological.unique()),
    )
    to_dense_prediction_model(model)
    if cuda:
        model.cuda()

    # create and train a classifier
    clf = EEGClassifier(
        model,
        cropped=True,
        criterion=CroppedLoss,
        criterion__loss_function=torch.nn.functional.nll_loss,
        optimizer=torch.optim.Adam,
        batch_size=32,
        callbacks=['accuracy'],
        train_split=train_split,
    )
    clf.fit(variable_tuh_windows_train, y=None, epochs=3)

    # make sure it does what we expect
    np.testing.assert_allclose(
        clf.history[:, 'train_loss'],
        np.array([
            0.689495325088501,
            0.1353449523448944,
            0.006638816092163324,
        ]),
        rtol=1e-1,
        atol=1e-1,
    )

    np.testing.assert_allclose(
        clf.history[:, 'valid_loss'],
        np.array([
            2.925871,
            3.611423,
            4.23494,
        ]),
        rtol=1e-1,
        atol=1e-1,
    )
Example #4
0
def test_trialwise_decoding():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)
    raw.apply_function(lambda x: x * 1000000)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)
    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    ds = EpochsDataset(epoched)

    train_set = Subset(ds, np.arange(60))
    valid_set = Subset(ds, np.arange(60, len(ds)))

    train_valid_split = predefined_split(valid_set)

    cuda = False
    if cuda:
        device = 'cuda'
    else:
        device = 'cpu'
    set_random_seeds(seed=20170629, cuda=cuda)
    n_classes = 2
    in_chans = train_set[0][0].shape[0]
    input_time_length = train_set[0][0].shape[1]
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_time_length=input_time_length,
        final_conv_length="auto",
    )
    if cuda:
        model.cuda()

    clf = EEGClassifier(
        model,
        cropped=False,
        criterion=torch.nn.NLLLoss,
        optimizer=torch.optim.Adam,
        train_split=train_valid_split,
        optimizer__lr=0.001,
        batch_size=30,
        callbacks=["accuracy"],
        device=device,
    )
    clf.fit(train_set, y=None, epochs=6)

    np.testing.assert_allclose(
        clf.history[:, 'train_loss'],
        np.array([
            1.1114974617958069, 1.0976492166519165, 0.668171226978302,
            0.5880511999130249, 0.7054798305034637, 0.5272344648838043
        ]),
        rtol=1e-4,
        atol=1e-5,
    )
    np.testing.assert_allclose(
        clf.history[:, 'valid_loss'],
        np.array([
            0.8467752933502197, 0.9804958701133728, 0.9134824872016907,
            0.8305345773696899, 0.8263336420059204, 0.8535978198051453
        ]),
        rtol=1e-4,
        atol=1e-5,
    )
    np.testing.assert_allclose(
        clf.history[:, 'train_accuracy'],
        np.array([
            0.7166666666666667, 0.6666666666666666, 0.85, 0.9333333333333333,
            0.9166666666666666, 0.9
        ]),
        rtol=1e-4,
        atol=1e-5,
    )
    np.testing.assert_allclose(
        clf.history[:, 'valid_accuracy'],
        np.array([
            0.6, 0.5666666666666667, 0.5333333333333333, 0.5333333333333333,
            0.6, 0.6666666666666666
        ]),
        rtol=1e-4,
        atol=1e-5,
    )
def test_eeg_classifier():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_window_samples = 450
    n_classes = 2
    in_chans = X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=12,
    )
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_window_samples, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]

    train_set = create_from_X_y(X[:48],
                                y[:48],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    valid_set = create_from_X_y(X[48:60],
                                y[48:60],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    cropped_cb_train = CroppedTrialEpochScoring(
        "accuracy",
        name="train_trial_accuracy",
        lower_is_better=False,
        on_train=True,
    )

    cropped_cb_valid = CroppedTrialEpochScoring(
        "accuracy",
        on_train=False,
        name="valid_trial_accuracy",
        lower_is_better=False,
    )

    clf = EEGClassifier(
        model,
        cropped=True,
        criterion=CroppedLoss,
        criterion__loss_function=nll_loss,
        optimizer=optim.Adam,
        train_split=predefined_split(valid_set),
        batch_size=32,
        callbacks=[
            ("train_trial_accuracy", cropped_cb_train),
            ("valid_trial_accuracy", cropped_cb_valid),
        ],
    )

    clf.fit(train_set, y=None, epochs=4)

    expected = [{
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.6639312505722046
        }, {
            'train_batch_size': 32,
            'train_loss': 2.6161606311798096
        }, {
            'train_batch_size': 32,
            'train_loss': 1.627132773399353
        }, {
            'valid_batch_size': 24,
            'valid_loss': 0.9677614569664001
        }],
        'epoch':
        1,
        'train_batch_count':
        3,
        'train_loss':
        1.9690748850504558,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.4791666666666667,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        0.9677614569664001,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        True
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.3829222917556763
        }, {
            'train_batch_size': 32,
            'train_loss': 1.3123714923858643
        }, {
            'train_batch_size': 32,
            'train_loss': 1.0109959840774536
        }, {
            'valid_batch_size': 24,
            'valid_loss': 1.9435862302780151
        }],
        'epoch':
        2,
        'train_batch_count':
        3,
        'train_loss':
        1.2354299227396648,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.5,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        1.9435862302780151,
        'valid_loss_best':
        False,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        False
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.172208547592163
        }, {
            'train_batch_size': 32,
            'train_loss': 0.8899562954902649
        }, {
            'train_batch_size': 32,
            'train_loss': 1.0232216119766235
        }, {
            'valid_batch_size': 24,
            'valid_loss': 0.9585554599761963
        }],
        'epoch':
        3,
        'train_batch_count':
        3,
        'train_loss':
        1.0284621516863506,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.5,
        'train_trial_accuracy_best':
        False,
        'valid_batch_count':
        1,
        'valid_loss':
        0.9585554599761963,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        False
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 0.9693693518638611
        }, {
            'train_batch_size': 32,
            'train_loss': 0.900641918182373
        }, {
            'train_batch_size': 32,
            'train_loss': 0.8839665651321411
        }, {
            'valid_batch_size': 24,
            'valid_loss': 0.873468816280365
        }],
        'epoch':
        4,
        'train_batch_count':
        3,
        'train_loss':
        0.9179926117261251,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.625,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        0.873468816280365,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.4166666666666667,
        'valid_trial_accuracy_best':
        False
    }]

    history_without_dur = [{k: v
                            for k, v in h.items() if k != "dur"}
                           for h in clf.history]
    assert_deep_allclose(expected, history_without_dur, atol=1e-3, rtol=1e-3)
Example #6
0
#net = EEGNetv4(n_chans, class_number, input_window_samples=input_window_samples, final_conv_length='auto', drop_prob=0.5)
#net = deepnet(n_chans,class_number,input_window_samples=wind,final_conv_length='auto',) # 81%

#net = deepnet_resnet(n_chans,n_classes,input_window_samples=input_window_samples,expand=True) # 50%

#net=d2lresnet() # 92%

#net=TSception(208)

#net=TSception(1000,n_chans,3,3,0.5)

img_size = [n_chans, wind]
#net = timm.create_model('visformer_tiny',num_classes=n_classes,in_chans=1,img_size=img_size)
if cuda:
    net.cuda()

lr = 0.005
weight_decay = 1e-10

criterion = torch.nn.CrossEntropyLoss()
#criterion = nn.NLLLoss()
#optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
#optimizer = torch.optim.Adadelta(net.parameters(), lr=lr)
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# Decay LR by a factor of 0.1 every 7 epochs
lr_schedulerr = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)

epoch_num = 100

for epoch in range(epoch_num):
def test_eeg_classifier():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_window_samples = 450
    n_classes = 2
    in_chans = X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=12,
    )
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    # determine output size
    test_input = np_to_th(
        np.ones((2, in_chans, input_window_samples, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]

    train_set = create_from_X_y(X[:48],
                                y[:48],
                                drop_last_window=False,
                                sfreq=100,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    valid_set = create_from_X_y(X[48:60],
                                y[48:60],
                                drop_last_window=False,
                                sfreq=100,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    cropped_cb_train = CroppedTrialEpochScoring(
        "accuracy",
        name="train_trial_accuracy",
        lower_is_better=False,
        on_train=True,
    )

    cropped_cb_valid = CroppedTrialEpochScoring(
        "accuracy",
        on_train=False,
        name="valid_trial_accuracy",
        lower_is_better=False,
    )

    clf = EEGClassifier(
        model,
        cropped=True,
        criterion=CroppedLoss,
        criterion__loss_function=nll_loss,
        optimizer=optim.Adam,
        train_split=predefined_split(valid_set),
        batch_size=32,
        callbacks=[
            ("train_trial_accuracy", cropped_cb_train),
            ("valid_trial_accuracy", cropped_cb_valid),
        ],
    )

    clf.fit(train_set, y=None, epochs=4)

    # Reproduce this exact output by using pprint(history_without_dur) and adjusting
    # indentation of all lines after first
    expectedh = [{
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.4175944328308105
        }, {
            'train_batch_size': 32,
            'train_loss': 2.4414331912994385
        }, {
            'train_batch_size': 32,
            'train_loss': 1.476792812347412
        }, {
            'valid_batch_size': 24,
            'valid_loss': 1.2322615385055542
        }],
        'epoch':
        1,
        'train_batch_count':
        3,
        'train_loss':
        1.7786068121592205,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.5,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        1.2322615385055542,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        True
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 0.9673743844032288
        }, {
            'train_batch_size': 32,
            'train_loss': 1.218681812286377
        }, {
            'train_batch_size': 32,
            'train_loss': 1.5651403665542603
        }, {
            'valid_batch_size': 24,
            'valid_loss': 1.123423457145691
        }],
        'epoch':
        2,
        'train_batch_count':
        3,
        'train_loss':
        1.250398854414622,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.5,
        'train_trial_accuracy_best':
        False,
        'valid_batch_count':
        1,
        'valid_loss':
        1.123423457145691,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        False
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.1562678813934326
        }, {
            'train_batch_size': 32,
            'train_loss': 1.5787755250930786
        }, {
            'train_batch_size': 32,
            'train_loss': 1.306514859199524
        }, {
            'valid_batch_size': 24,
            'valid_loss': 1.037418007850647
        }],
        'epoch':
        3,
        'train_batch_count':
        3,
        'train_loss':
        1.3471860885620117,
        'train_loss_best':
        False,
        'train_trial_accuracy':
        0.5208333333333334,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        1.037418007850647,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        False
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.8480840921401978
        }, {
            'train_batch_size': 32,
            'train_loss': 1.0466501712799072
        }, {
            'train_batch_size': 32,
            'train_loss': 0.9813234210014343
        }, {
            'valid_batch_size': 24,
            'valid_loss': 0.9420649409294128
        }],
        'epoch':
        4,
        'train_batch_count':
        3,
        'train_loss':
        1.2920192281405132,
        'train_loss_best':
        False,
        'train_trial_accuracy':
        0.75,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        0.9420649409294128,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.4166666666666667,
        'valid_trial_accuracy_best':
        False
    }]

    history_without_dur = [{k: v
                            for k, v in h.items() if k != "dur"}
                           for h in clf.history]
    assert_deep_allclose(expectedh, history_without_dur, atol=1e-3, rtol=1e-3)
    return clf
Example #8
0
def test_trialwise_decoding():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(
        subject_id, event_codes, update_path=False
    )

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(
            path, preload=True, stim_channel="auto", verbose="WARNING"
        )
        for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)
    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(
        raw.info, meg=False, eeg=True, stim=False, eog=False, exclude="bads"
    )

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    SignalAndTarget = namedtuple("SignalAndTarget", "X y")

    train_set = SignalAndTarget(X[:60], y=y[:60])
    test_set = SignalAndTarget(X[60:], y=y[60:])

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)
    n_classes = 2
    in_chans = train_set.X.shape[1]
    # final_conv_length = auto ensures we only get a single output in the time dimension
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_time_length=train_set.X.shape[2],
        final_conv_length="auto",
    )
    if cuda:
        model.cuda()

    optimizer = optim.Adam(model.parameters())

    rng = RandomState((2017, 6, 30))
    losses = []
    accuracies = []
    for i_epoch in range(6):
        i_trials_in_batch = get_balanced_batches(
            len(train_set.X), rng, shuffle=True, batch_size=30
        )
        # Set model to training mode
        model.train()
        for i_trials in i_trials_in_batch:
            # Have to add empty fourth dimension to X
            batch_X = train_set.X[i_trials][:, :, :, None]
            batch_y = train_set.y[i_trials]
            net_in = np_to_var(batch_X)
            if cuda:
                net_in = net_in.cuda()
            net_target = np_to_var(batch_y)
            if cuda:
                net_target = net_target.cuda()
            # Remove gradients of last backward pass from all parameters
            optimizer.zero_grad()
            # Compute outputs of the network
            outputs = model(net_in)
            # Compute the loss
            loss = F.nll_loss(outputs, net_target)
            # Do the backpropagation
            loss.backward()
            # Update parameters with the optimizer
            optimizer.step()

        # Print some statistics each epoch
        model.eval()
        print("Epoch {:d}".format(i_epoch))
        for setname, dataset in (("Train", train_set), ("Test", test_set)):
            # Here, we will use the entire dataset at once, which is still possible
            # for such smaller datasets. Otherwise we would have to use batches.
            net_in = np_to_var(dataset.X[:, :, :, None])
            if cuda:
                net_in = net_in.cuda()
            net_target = np_to_var(dataset.y)
            if cuda:
                net_target = net_target.cuda()
            outputs = model(net_in)
            loss = F.nll_loss(outputs, net_target)
            losses.append(float(var_to_np(loss)))
            print("{:6s} Loss: {:.5f}".format(setname, float(var_to_np(loss))))
            predicted_labels = np.argmax(var_to_np(outputs), axis=1)
            accuracy = np.mean(dataset.y == predicted_labels)
            accuracies.append(accuracy * 100)
            print("{:6s} Accuracy: {:.1f}%".format(setname, accuracy * 100))

    np.testing.assert_allclose(
        np.array(losses),
        np.array(
            [
                0.91796708,
                1.2714895,
                0.4999536,
                0.94365239,
                0.39268905,
                0.89928466,
                0.37648854,
                0.8940345,
                0.35774994,
                0.86749417,
                0.35080773,
                0.80767328,
            ]
        ),
        rtol=1e-4,
        atol=1e-5,
    )

    np.testing.assert_allclose(
        np.array(accuracies),
        np.array(
            [
                55.0,
                63.33333333,
                71.66666667,
                63.33333333,
                81.66666667,
                60.0,
                78.33333333,
                63.33333333,
                83.33333333,
                66.66666667,
                80.0,
                66.66666667,
            ]
        ),
        rtol=1e-4,
        atol=1e-5,
    )
Example #9
0
def test_trialwise_decoding():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)
    raw.apply_function(lambda x: x * 1000000)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)
    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    ds = EpochsDataset(epoched)

    train_set = Subset(ds, np.arange(60))
    valid_set = Subset(ds, np.arange(60, len(ds)))

    train_valid_split = predefined_split(valid_set)

    cuda = False
    if cuda:
        device = 'cuda'
    else:
        device = 'cpu'
    set_random_seeds(seed=20170629, cuda=cuda)
    n_classes = 2
    in_chans = train_set[0][0].shape[0]
    input_window_samples = train_set[0][0].shape[1]
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_window_samples=input_window_samples,
        final_conv_length="auto",
    )
    if cuda:
        model.cuda()

    clf = EEGClassifier(
        model,
        cropped=False,
        criterion=torch.nn.NLLLoss,
        optimizer=torch.optim.Adam,
        train_split=train_valid_split,
        optimizer__lr=0.001,
        batch_size=30,
        callbacks=["accuracy"],
        device=device,
    )
    clf.fit(train_set, y=None, epochs=6)

    np.testing.assert_allclose(
        clf.history[:, 'train_loss'],
        np.array([
            1.501254916191101, 0.8498813807964325, 0.6930762231349945,
            0.7033905684947968, 0.7674900889396667, 0.47585436701774597
        ]),
        rtol=1e-4,
        atol=1e-5,
    )

    np.testing.assert_allclose(
        clf.history[:, 'valid_loss'],
        np.array([
            0.9057853817939758, 1.0028964281082153, 0.85847407579422,
            0.88216233253479, 0.8980739712715149, 0.8764537572860718
        ]),
        rtol=1e-4,
        atol=1e-5,
    )

    np.testing.assert_allclose(
        clf.history[:, 'train_accuracy'],
        np.array([
            0.7666666666666667, 0.7333333333333333, 0.8166666666666667,
            0.8333333333333334, 0.9333333333333333, 0.9333333333333333
        ]),
        rtol=1e-4,
        atol=1e-5,
    )

    np.testing.assert_allclose(
        clf.history[:, 'valid_accuracy'],
        np.array([
            0.5666666666666667,
            0.5666666666666667,
            0.6,
            0.6,
            0.6,
            0.6,
        ]),
        rtol=1e-4,
        atol=1e-5,
    )
Example #10
0
def test_experiment_class():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id, event_codes)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel='auto',
                            verbose='WARNING') for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude='bads')

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(raw,
                         events,
                         dict(hands=2, feet=3),
                         tmin=1,
                         tmax=4.1,
                         proj=False,
                         picks=eeg_channel_inds,
                         baseline=None,
                         preload=True)

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    train_set = SignalAndTarget(X[:60], y=y[:60])
    test_set = SignalAndTarget(X[60:], y=y[60:])

    train_set, valid_set = split_into_two_sets(train_set,
                                               first_set_fraction=0.8)

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = train_set.X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(in_chans=in_chans,
                            n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length=12).create_network()
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    optimizer = optim.Adam(model.parameters())

    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    print("{:d} predictions per input/trial".format(n_preds_per_input))

    # Iterator is used to iterate over datasets both for training
    # and evaluation
    iterator = CropsFromTrialsIterator(batch_size=32,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)

    # Loss function takes predictions as they come out of the network and the targets
    # and returns a loss
    def loss_function(preds, targets):
        return F.nll_loss(th.mean(preds, dim=2, keepdim=False), targets)

    # Could be used to apply some constraint on the models, then should be object
    # with apply method that accepts a module
    model_constraint = None
    # Monitors log the training progress
    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length),
        RuntimeMonitor(),
    ]
    # Stop criterion determines when the first stop happens
    stop_criterion = MaxEpochs(4)
    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator,
                     loss_function,
                     optimizer,
                     model_constraint,
                     monitors,
                     stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     batch_modifier=None,
                     cuda=cuda)

    # need to setup python logging before to be able to see anything
    logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
                        level=logging.DEBUG,
                        stream=sys.stdout)
    exp.run()

    compare_df = pd.read_csv(
        StringIO(
            'train_loss,valid_loss,test_loss,train_sample_misclass,valid_sample_misclass,'
            'test_sample_misclass,train_misclass,valid_misclass,test_misclass\n'
            '14.167170524597168,13.910758018493652,15.945781707763672,0.5,0.5,'
            '0.5333333333333333,0.5,0.5,0.5333333333333333\n'
            '1.1735659837722778,1.4342904090881348,1.8664429187774658,0.4629567736185384,'
            '0.5120320855614973,0.5336007130124778,0.5,0.5,0.5333333333333333\n'
            '1.3168460130691528,1.60431969165802,1.9181344509124756,0.49298128342245995,'
            '0.5109180035650625,0.531729055258467,0.5,0.5,0.5333333333333333\n'
            '0.8465543389320374,1.280307412147522,1.439755916595459,0.4413435828877005,'
            '0.5461229946524064,0.5283422459893048,0.47916666666666663,0.5,'
            '0.5333333333333333\n0.6977059841156006,1.1762590408325195,1.2779350280761719,'
            '0.40290775401069523,0.588903743315508,0.5307486631016043,0.5,0.5,0.5\n'
            '0.7934166193008423,1.1762590408325195,1.2779350280761719,0.4401069518716577,'
            '0.588903743315508,0.5307486631016043,0.5,0.5,0.5\n0.5982189178466797,'
            '0.8581563830375671,0.9598925113677979,0.32032085561497325,0.47660427807486627,'
            '0.4672905525846702,0.31666666666666665,0.5,0.4666666666666667\n0.5044312477111816,'
            '0.7133197784423828,0.8164243102073669,0.2591354723707665,0.45699643493761144,'
            '0.4393048128342246,0.16666666666666663,0.41666666666666663,0.43333333333333335\n'
            '0.4815250039100647,0.6736412644386292,0.8016976714134216,0.23413547237076648,'
            '0.39505347593582885,0.42932263814616756,0.15000000000000002,0.41666666666666663,0.5\n'
        ))

    for col in compare_df:
        np.testing.assert_allclose(np.array(compare_df[col]),
                                   exp.epochs_df[col],
                                   rtol=1e-3,
                                   atol=1e-4)
Example #11
0
def exp(subject_id):
    test_subj = np.r_[subject_id]
    print('test subj:' + str(test_subj))
    train_subj = np.setdiff1d(np.r_[1:11], test_subj)

    train_set = BaseConcatDataset([splitted[ids] for ids in train_subj])
    valid_set = BaseConcatDataset([splitted[ids] for ids in test_subj])

    # #
    model = ShallowFBCSPNet(
        n_chans,
        n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=30,
    )

    # #
    # embedding_net = Deep4Net_origin(4, 22, input_window_samples)
    # model = FcClfNet(embedding_net)

    print(model)

    # Send model to GPU
    if cuda:
        model.cuda()
    from braindecode.models.util import to_dense_prediction_model, get_output_shape
    to_dense_prediction_model(model)
    ######################################################################
    # Training
    # --------
    #

    ######################################################################
    # In difference to trialwise decoding, we now should supply
    # ``cropped=True`` to the EEGClassifier, and ``CroppedLoss`` as the
    # criterion, as well as ``criterion__loss_function`` as the loss function
    # applied to the meaned predictions.
    #

    ######################################################################
    # .. note::
    #    In this tutorial, we use some default parameters that we
    #    have found to work well for motor decoding, however we strongly
    #    encourage you to perform your own hyperparameter optimization using
    #    cross validation on your training data.
    #

    from skorch.callbacks import LRScheduler
    from skorch.helper import predefined_split

    from braindecode import EEGClassifier
    from braindecode.training.losses import CroppedLoss
    from braindecode.training.scoring import trial_preds_from_window_preds

    # # These values we found good for shallow network:
    lr = 0.0625 * 0.01
    weight_decay = 0

    # # For deep4 they should be:
    # lr = 1 * 0.01
    # weight_decay = 0.5 * 0.001

    batch_size = 400
    n_epochs = 100

    clf = EEGClassifier(
        model,
        cropped=True,
        criterion=CroppedLoss,
        criterion__loss_function=torch.nn.functional.nll_loss,
        optimizer=torch.optim.AdamW,
        train_split=predefined_split(valid_set),
        optimizer__lr=lr,
        optimizer__weight_decay=weight_decay,
        iterator_train__shuffle=True,
        batch_size=batch_size,
        callbacks=[
            "accuracy",
            ("lr_scheduler",
             LRScheduler('CosineAnnealingLR', T_max=n_epochs - 1)),
        ],
        device=device,
    )
    # Model training for a specified number of epochs. `y` is None as it is already supplied
    # in the dataset.
    clf.fit(train_set, y=None, epochs=n_epochs)

    ######################################################################
    # Plot Results
    # ------------
    #

    ######################################################################
    # This is again the same code as in trialwise decoding.
    #
    # .. note::
    #     Note that we drop further in the classification error and
    #     loss as in the trialwise decoding tutorial.
    #

    import matplotlib.pyplot as plt
    from matplotlib.lines import Line2D
    import pandas as pd
    # Extract loss and accuracy values for plotting from history object
    results_columns = [
        'train_loss', 'valid_loss', 'train_accuracy', 'valid_accuracy'
    ]
    df = pd.DataFrame(clf.history[:, results_columns],
                      columns=results_columns,
                      index=clf.history[:, 'epoch'])

    # get percent of misclass for better visual comparison to loss
    df = df.assign(train_misclass=100 - 100 * df.train_accuracy,
                   valid_misclass=100 - 100 * df.valid_accuracy)

    plt.style.use('seaborn')
    fig, ax1 = plt.subplots(figsize=(8, 3))
    df.loc[:, ['train_loss', 'valid_loss']].plot(ax=ax1,
                                                 style=['-', ':'],
                                                 marker='o',
                                                 color='tab:blue',
                                                 legend=False,
                                                 fontsize=14)

    ax1.tick_params(axis='y', labelcolor='tab:blue', labelsize=14)
    ax1.set_ylabel("Loss", color='tab:blue', fontsize=14)

    ax2 = ax1.twinx()  # instantiate a second axes that shares the same x-axis

    df.loc[:, ['train_misclass', 'valid_misclass']].plot(ax=ax2,
                                                         style=['-', ':'],
                                                         marker='o',
                                                         color='tab:red',
                                                         legend=False)
    ax2.tick_params(axis='y', labelcolor='tab:red', labelsize=14)
    ax2.set_ylabel("Misclassification Rate [%]", color='tab:red', fontsize=14)
    ax2.set_ylim(ax2.get_ylim()[0], 85)  # make some room for legend
    ax1.set_xlabel("Epoch", fontsize=14)

    # where some data has already been plotted to ax
    handles = []
    handles.append(
        Line2D([0], [0],
               color='black',
               linewidth=1,
               linestyle='-',
               label='Train'))
    handles.append(
        Line2D([0], [0],
               color='black',
               linewidth=1,
               linestyle=':',
               label='Valid'))
    plt.legend(handles, [h.get_label() for h in handles], fontsize=14)
    plt.tight_layout()

    return df
Example #12
0
#
# embedding_net = Deep4Net_origin(4, 22, input_window_samples)
# model = FcClfNet(embedding_net)

model = ShallowFBCSPNet(
    n_chans,
    n_classes,
    input_window_samples=input_window_samples,
    final_conv_length=30,
)

print(model)

# Send model to GPU
if cuda:
    model.cuda(device)

######################################################################
# And now we transform model with strides to a model that outputs dense
# prediction, so we can use it to obtain predictions for all
# crops.
#

from braindecode.models.util import to_dense_prediction_model, get_output_shape
to_dense_prediction_model(model)

######################################################################
# To know the models’ receptive field, we calculate the shape of model
# output for a dummy input.
#
# dummy_input = torch.ones(
Example #13
0
def exp(subject_id):

    dataset = MOABBDataset(dataset_name="BNCI2014001", subject_ids=subject_id)

    from braindecode.datautil.preprocess import exponential_moving_standardize
    from braindecode.datautil.preprocess import MNEPreproc, NumpyPreproc, preprocess

    low_cut_hz = 0.  # low cut frequency for filtering
    high_cut_hz = 38.  # high cut frequency for filtering
    # Parameters for exponential moving standardization
    factor_new = 1e-3
    init_block_size = 1000

    preprocessors = [
        # keep only EEG sensors
        MNEPreproc(fn='pick_types', eeg=True, meg=False, stim=False),
        # convert from volt to microvolt, directly modifying the numpy array
        NumpyPreproc(fn=lambda x: x * 1e6),
        # bandpass filter
        MNEPreproc(fn='filter', l_freq=low_cut_hz, h_freq=high_cut_hz),
        # exponential moving standardization
        # NumpyPreproc(fn=exponential_moving_standardize, factor_new=factor_new,
        #     init_block_size=init_block_size)
    ]

    # Transform the data
    preprocess(dataset, preprocessors)

    ######################################################################
    # Create model and compute windowing parameters
    # ---------------------------------------------
    #

    ######################################################################
    # In contrast to trialwise decoding, we first have to create the model
    # before we can cut the dataset into windows. This is because we need to
    # know the receptive field of the network to know how large the window
    # stride should be.
    #

    ######################################################################
    # We first choose the compute/input window size that will be fed to the
    # network during training This has to be larger than the networks
    # receptive field size and can otherwise be chosen for computational
    # efficiency (see explanations in the beginning of this tutorial). Here we
    # choose 1000 samples, which are 4 seconds for the 250 Hz sampling rate.
    #

    input_window_samples = 1000

    ######################################################################
    # Now we create the model. To enable it to be used in cropped decoding
    # efficiently, we manually set the length of the final convolution layer
    # to some length that makes the receptive field of the ConvNet smaller
    # than ``input_window_samples`` (see ``final_conv_length=30`` in the model
    # definition).
    #

    import torch
    from braindecode.util import set_random_seeds
    from braindecode.models import ShallowFBCSPNet, Deep4Net

    cuda = torch.cuda.is_available(
    )  # check if GPU is available, if True chooses to use it
    device = 'cuda:1' if cuda else 'cpu'
    if cuda:
        torch.backends.cudnn.benchmark = True
    seed = 20190706  # random seed to make results reproducible
    # Set random seed to be able to reproduce results
    set_random_seeds(seed=seed, cuda=cuda)

    n_classes = 4
    # Extract number of chans from dataset
    n_chans = dataset[0][0].shape[0]

    # model = Deep4Net(
    #     n_chans,
    #     n_classes,
    #     input_window_samples=input_window_samples,
    #     final_conv_length="auto",
    # )
    #
    #
    #
    # embedding_net = Deep4Net_origin(4, 22, input_window_samples)
    # model = FcClfNet(embedding_net)

    model = ShallowFBCSPNet(
        n_chans,
        n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=30,
    )

    print(model)

    # Send model to GPU
    if cuda:
        model.cuda(device)

    ######################################################################
    # And now we transform model with strides to a model that outputs dense
    # prediction, so we can use it to obtain predictions for all
    # crops.
    #

    from braindecode.models.util import to_dense_prediction_model, get_output_shape
    to_dense_prediction_model(model)

    n_preds_per_input = get_output_shape(model, n_chans,
                                         input_window_samples)[2]
    print("n_preds_per_input : ", n_preds_per_input)
    print(model)

    ######################################################################
    # Cut the data into windows
    # -------------------------
    #

    ######################################################################
    # In contrast to trialwise decoding, we have to supply an explicit window size and window stride to the
    # ``create_windows_from_events`` function.
    #

    import numpy as np
    from braindecode.datautil.windowers import create_windows_from_events

    trial_start_offset_seconds = -0.5
    # Extract sampling frequency, check that they are same in all datasets
    sfreq = dataset.datasets[0].raw.info['sfreq']
    assert all([ds.raw.info['sfreq'] == sfreq for ds in dataset.datasets])

    # Calculate the trial start offset in samples.
    trial_start_offset_samples = int(trial_start_offset_seconds * sfreq)

    # Create windows using braindecode function for this. It needs parameters to define how
    # trials should be used.
    windows_dataset = create_windows_from_events(
        dataset,
        trial_start_offset_samples=trial_start_offset_samples,
        trial_stop_offset_samples=0,
        window_size_samples=input_window_samples,
        window_stride_samples=n_preds_per_input,
        drop_last_window=False,
        preload=True,
    )

    ######################################################################
    # Split the dataset
    # -----------------
    #
    # This code is the same as in trialwise decoding.
    #

    from braindecode.datasets.base import BaseConcatDataset
    splitted = windows_dataset.split('session')

    train_set = splitted['session_T']
    valid_set = splitted['session_E']

    ######################################################################
    # In difference to trialwise decoding, we now should supply
    # ``cropped=True`` to the EEGClassifier, and ``CroppedLoss`` as the
    # criterion, as well as ``criterion__loss_function`` as the loss function
    # applied to the meaned predictions.
    #

    ######################################################################
    # .. note::
    #    In this tutorial, we use some default parameters that we
    #    have found to work well for motor decoding, however we strongly
    #    encourage you to perform your own hyperparameter optimization using
    #    cross validation on your training data.
    #

    from skorch.callbacks import LRScheduler
    from skorch.helper import predefined_split

    from braindecode import EEGClassifier
    from braindecode.training.losses import CroppedLoss
    from braindecode.training.scoring import trial_preds_from_window_preds

    # # These values we found good for shallow network:
    lr = 0.0625 * 0.01
    weight_decay = 0

    # # For deep4 they should be:
    # lr = 1 * 0.01
    # weight_decay = 0.5 * 0.001
    #
    batch_size = 8
    n_epochs = 100

    clf = EEGClassifier(
        model,
        cropped=True,
        criterion=CroppedLoss,
        criterion__loss_function=torch.nn.functional.nll_loss,
        optimizer=torch.optim.AdamW,
        train_split=predefined_split(valid_set),
        optimizer__lr=lr,
        optimizer__weight_decay=weight_decay,
        iterator_train__shuffle=True,
        batch_size=batch_size,
        callbacks=[
            "accuracy",
            ("lr_scheduler",
             LRScheduler('CosineAnnealingLR', T_max=n_epochs - 1)),
        ],
        device=device,
    )
    # Model training for a specified number of epochs. `y` is None as it is already supplied
    # in the dataset.
    clf.fit(train_set, y=None, epochs=n_epochs)

    ######################################################################
    # Plot Results
    # ------------
    #

    ######################################################################
    # This is again the same code as in trialwise decoding.
    #
    # .. note::
    #     Note that we drop further in the classification error and
    #     loss as in the trialwise decoding tutorial.
    #

    import matplotlib.pyplot as plt
    from matplotlib.lines import Line2D
    import pandas as pd
    # Extract loss and accuracy values for plotting from history object
    results_columns = [
        'train_loss', 'valid_loss', 'train_accuracy', 'valid_accuracy'
    ]
    df = pd.DataFrame(clf.history[:, results_columns],
                      columns=results_columns,
                      index=clf.history[:, 'epoch'])

    # get percent of misclass for better visual comparison to loss
    df = df.assign(train_misclass=100 - 100 * df.train_accuracy,
                   valid_misclass=100 - 100 * df.valid_accuracy)

    plt.style.use('seaborn')
    fig, ax1 = plt.subplots(figsize=(8, 3))
    df.loc[:, ['train_loss', 'valid_loss']].plot(ax=ax1,
                                                 style=['-', ':'],
                                                 marker='o',
                                                 color='tab:blue',
                                                 legend=False,
                                                 fontsize=14)

    ax1.tick_params(axis='y', labelcolor='tab:blue', labelsize=14)
    ax1.set_ylabel("Loss", color='tab:blue', fontsize=14)

    ax2 = ax1.twinx()  # instantiate a second axes that shares the same x-axis

    df.loc[:, ['train_misclass', 'valid_misclass']].plot(ax=ax2,
                                                         style=['-', ':'],
                                                         marker='o',
                                                         color='tab:red',
                                                         legend=False)
    ax2.tick_params(axis='y', labelcolor='tab:red', labelsize=14)
    ax2.set_ylabel("Misclassification Rate [%]", color='tab:red', fontsize=14)
    ax2.set_ylim(ax2.get_ylim()[0], 85)  # make some room for legend
    ax1.set_xlabel("Epoch", fontsize=14)

    # where some data has already been plotted to ax
    handles = []
    handles.append(
        Line2D([0], [0],
               color='black',
               linewidth=1,
               linestyle='-',
               label='Train'))
    handles.append(
        Line2D([0], [0],
               color='black',
               linewidth=1,
               linestyle=':',
               label='Valid'))
    plt.legend(handles, [h.get_label() for h in handles], fontsize=14)
    plt.tight_layout()

    return df
def test_eeg_classifier():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_window_samples = 450
    n_classes = 2
    in_chans = X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=12,
    )
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_window_samples, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]

    train_set = create_from_X_y(X[:48],
                                y[:48],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    valid_set = create_from_X_y(X[48:60],
                                y[48:60],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    cropped_cb_train = CroppedTrialEpochScoring(
        "accuracy",
        name="train_trial_accuracy",
        lower_is_better=False,
        on_train=True,
    )

    cropped_cb_valid = CroppedTrialEpochScoring(
        "accuracy",
        on_train=False,
        name="valid_trial_accuracy",
        lower_is_better=False,
    )

    clf = EEGClassifier(
        model,
        criterion=CroppedLoss,
        criterion__loss_function=nll_loss,
        optimizer=optim.Adam,
        train_split=predefined_split(valid_set),
        batch_size=32,
        callbacks=[
            ("train_trial_accuracy", cropped_cb_train),
            ("valid_trial_accuracy", cropped_cb_valid),
        ],
    )

    clf.fit(train_set, y=None, epochs=4)

    expected = [
        {
            "batches": [
                {
                    "train_loss": 1.9391239881515503,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 2.895704507827759,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.0713893175125122,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 1.1811838150024414,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            1,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            1.9687392711639404,
            "train_loss_best":
            True,
            "valid_loss":
            1.1811838150024414,
            "valid_loss_best":
            True,
            "train_trial_accuracy":
            0.4791666666666667,
            "train_trial_accuracy_best":
            True,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            True,
        },
        {
            "batches": [
                {
                    "train_loss": 1.5488793849945068,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.1174801588058472,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.1525697708129883,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 2.202029228210449,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            2,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            1.2729764382044475,
            "train_loss_best":
            True,
            "valid_loss":
            2.202029228210449,
            "valid_loss_best":
            False,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            True,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            False,
        },
        {
            "batches": [
                {
                    "train_loss": 1.0049529075622559,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.0266971588134766,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.0799436569213867,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 1.0638500452041626,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            3,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            1.0371979077657063,
            "train_loss_best":
            True,
            "valid_loss":
            1.0638500452041626,
            "valid_loss_best":
            True,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            False,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            False,
        },
        {
            "batches": [
                {
                    "train_loss": 1.0052555799484253,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 0.8479514718055725,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 0.9589881300926208,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 0.8794112801551819,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            4,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            0.9373983939488729,
            "train_loss_best":
            True,
            "valid_loss":
            0.8794112801551819,
            "valid_loss_best":
            True,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            False,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            False,
        },
    ]

    history_without_dur = [{k: v
                            for k, v in h.items() if k != "dur"}
                           for h in clf.history]
    assert_deep_allclose(expected, history_without_dur, atol=1e-3, rtol=1e-3)
Example #15
0
def exp(subject_id):
    import torch
    test_subj = np.r_[subject_id]

    print('test subj:' + str(test_subj))

    #20% validation
    train_size = int(0.9* len(splitted['session_T']))
    test_size = len(splitted['session_T']) - train_size



    # train_set, valid_set = torch.utils.data.random_split(splitted['session_T'], [train_size, test_size])
    train_set = splitted['session_T']
    test_set = splitted['session_E']



    # model = Deep4Net(
    #     n_chans,
    #     n_classes,
    #     input_window_samples=input_window_samples,
    #     final_conv_length="auto",
    # )

    from torch.utils.data import Dataset, ConcatDataset




    crop_size = 1000
    # embedding_net = Deep4Net_origin(n_classes, n_chans, crop_size)
    # model = FcClfNet(embedding_net)

    model = ShallowFBCSPNet(
        n_chans,
        n_classes,
        input_window_samples=input_window_samples,
        final_conv_length='auto',
    )

    from braindecode.models.util import to_dense_prediction_model, get_output_shape
    to_dense_prediction_model(model)

    n_preds_per_input = get_output_shape(model, 22, input_window_samples)[2]
    print("n_preds_per_input : ", n_preds_per_input)
    print(model)


    batch_size =8
    epochs = 200






    lr = 0.0625 * 0.01
    weight_decay = 0



    train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)
    # valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=False)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)



    # Send model to GPU
    if cuda:
        model.cuda()

    from torch.optim import lr_scheduler
    import torch.optim as optim

    import argparse
    parser = argparse.ArgumentParser(description='cross subject domain adaptation')
    parser.add_argument('--batch-size', type=int, default=50, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=50, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=100, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model', action='store_true', default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    args.gpuidx = 0
    args.seed = 0
    args.use_tensorboard = False
    args.save_model = False

    optimizer = optim.AdamW(model.parameters(), lr=0.01, weight_decay=0.5 * 0.001)
    # scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=1)
    scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs-1)



    import pandas as pd
    results_columns = ['test_loss',  'test_accuracy']
    df = pd.DataFrame(columns=results_columns)

    for epochidx in range(1, epochs):
        print(epochidx)
        train_crop(10, model, device, train_loader,optimizer,scheduler,cuda, args.gpuidx)
        test_loss, test_score = eval_crop(model, device, test_loader)
        results = { 'test_loss': test_loss, 'test_accuracy': test_score}
        df = df.append(results, ignore_index=True)
        print(results)

    return df
def create_example_model(n_channels,
                         n_classes,
                         window_len_samples,
                         kind='shallow',
                         cuda=False):
    """Create model, loss and optimizer.

    Parameters
    ----------
    n_channels : int
        Number of channels in the input
    n_times : int
        Window length in the input
    n_classes : int
        Number of classes in the output
    kind : str
        'shallow' or 'deep'
    cuda : bool
        If True, move the model to a CUDA device.

    Returns
    -------
    model : torch.nn.Module
        Model to train.
    loss :
        Loss function
    optimizer :
        Optimizer
    """
    if kind == 'shallow':
        model = ShallowFBCSPNet(n_channels,
                                n_classes,
                                input_window_samples=window_len_samples,
                                n_filters_time=40,
                                filter_time_length=25,
                                n_filters_spat=40,
                                pool_time_length=75,
                                pool_time_stride=15,
                                final_conv_length='auto',
                                split_first_layer=True,
                                batch_norm=True,
                                batch_norm_alpha=0.1,
                                drop_prob=0.5)
    elif kind == 'deep':
        model = Deep4Net(n_channels,
                         n_classes,
                         input_window_samples=window_len_samples,
                         final_conv_length='auto',
                         n_filters_time=25,
                         n_filters_spat=25,
                         filter_time_length=10,
                         pool_time_length=3,
                         pool_time_stride=3,
                         n_filters_2=50,
                         filter_length_2=10,
                         n_filters_3=100,
                         filter_length_3=10,
                         n_filters_4=200,
                         filter_length_4=10,
                         first_pool_mode="max",
                         later_pool_mode="max",
                         drop_prob=0.5,
                         double_time_convs=False,
                         split_first_layer=True,
                         batch_norm=True,
                         batch_norm_alpha=0.1,
                         stride_before_pool=False)
    else:
        raise ValueError

    if cuda:
        model.cuda()

    optimizer = optim.Adam(model.parameters())
    loss = nn.NLLLoss()

    return model, loss, optimizer
Example #17
0
def test_eeg_classifier():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_window_samples = 450
    n_classes = 2
    in_chans = X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=12,
    )
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_window_samples, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]

    train_set = create_from_X_y(X[:48],
                                y[:48],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    valid_set = create_from_X_y(X[48:60],
                                y[48:60],
                                drop_last_window=False,
                                window_size_samples=input_window_samples,
                                window_stride_samples=n_preds_per_input)

    cropped_cb_train = CroppedTrialEpochScoring(
        "accuracy",
        name="train_trial_accuracy",
        lower_is_better=False,
        on_train=True,
    )

    cropped_cb_valid = CroppedTrialEpochScoring(
        "accuracy",
        on_train=False,
        name="valid_trial_accuracy",
        lower_is_better=False,
    )

    clf = EEGClassifier(
        model,
        criterion=CroppedLoss,
        criterion__loss_function=nll_loss,
        optimizer=optim.Adam,
        train_split=predefined_split(valid_set),
        batch_size=32,
        callbacks=[
            ("train_trial_accuracy", cropped_cb_train),
            ("valid_trial_accuracy", cropped_cb_valid),
        ],
    )

    clf.fit(train_set, y=None, epochs=4)

    expected = [{
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.9391239881515503
        }, {
            'train_batch_size': 32,
            'train_loss': 2.895704507827759
        }, {
            'train_batch_size': 32,
            'train_loss': 1.0713887214660645
        }, {
            'valid_batch_size': 24,
            'valid_loss': 1.18110191822052
        }],
        'epoch':
        1,
        'train_batch_count':
        3,
        'train_loss':
        1.9687390724817913,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.4791666666666667,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        1.18110191822052,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        True
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.6741573810577393
        }, {
            'train_batch_size': 32,
            'train_loss': 0.9984264373779297
        }, {
            'train_batch_size': 32,
            'train_loss': 1.1340471506118774
        }, {
            'valid_batch_size': 24,
            'valid_loss': 2.5375664234161377
        }],
        'epoch':
        2,
        'train_batch_count':
        3,
        'train_loss':
        1.2688769896825154,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.5,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        2.5375664234161377,
        'valid_loss_best':
        False,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        False
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 0.8795645833015442
        }, {
            'train_batch_size': 32,
            'train_loss': 1.0339491367340088
        }, {
            'train_batch_size': 32,
            'train_loss': 1.19275963306427
        }, {
            'valid_batch_size': 24,
            'valid_loss': 1.655737042427063
        }],
        'epoch':
        3,
        'train_batch_count':
        3,
        'train_loss':
        1.0354244510332744,
        'train_loss_best':
        True,
        'train_trial_accuracy':
        0.5,
        'train_trial_accuracy_best':
        False,
        'valid_batch_count':
        1,
        'valid_loss':
        1.655737042427063,
        'valid_loss_best':
        False,
        'valid_trial_accuracy':
        0.5,
        'valid_trial_accuracy_best':
        False
    }, {
        'batches': [{
            'train_batch_size': 32,
            'train_loss': 1.1963350772857666
        }, {
            'train_batch_size': 32,
            'train_loss': 0.8621770143508911
        }, {
            'train_batch_size': 32,
            'train_loss': 1.099318265914917
        }, {
            'valid_batch_size': 24,
            'valid_loss': 1.0293445587158203
        }],
        'epoch':
        4,
        'train_batch_count':
        3,
        'train_loss':
        1.0526101191838582,
        'train_loss_best':
        False,
        'train_trial_accuracy':
        0.625,
        'train_trial_accuracy_best':
        True,
        'valid_batch_count':
        1,
        'valid_loss':
        1.0293445587158203,
        'valid_loss_best':
        True,
        'valid_trial_accuracy':
        0.25,
        'valid_trial_accuracy_best':
        False
    }]

    history_without_dur = [{k: v
                            for k, v in h.items() if k != "dur"}
                           for h in clf.history]
    assert_deep_allclose(expected, history_without_dur, atol=1e-3, rtol=1e-3)
Example #18
0
def test_trialwise_decoding():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)
    raw.apply_function(lambda x: x * 1000000)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)
    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    ds = EpochsDataset(epoched)

    train_set = Subset(ds, np.arange(60))
    valid_set = Subset(ds, np.arange(60, len(ds)))

    train_valid_split = predefined_split(valid_set)

    cuda = False
    if cuda:
        device = 'cuda'
    else:
        device = 'cpu'
    set_random_seeds(seed=20170629, cuda=cuda)
    n_classes = 2
    in_chans = train_set[0][0].shape[0]
    input_window_samples = train_set[0][0].shape[1]
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_window_samples=input_window_samples,
        final_conv_length="auto",
    )
    if cuda:
        model.cuda()

    clf = EEGClassifier(
        model,
        cropped=False,
        criterion=torch.nn.NLLLoss,
        optimizer=torch.optim.Adam,
        train_split=train_valid_split,
        optimizer__lr=0.001,
        batch_size=30,
        callbacks=["accuracy"],
        device=device,
    )
    clf.fit(train_set, y=None, epochs=6)

    np.testing.assert_allclose(
        clf.history[:, 'train_loss'],
        np.array([
            1.1114967465400696, 1.0180627405643463, 0.8020123243331909,
            0.8934760391712189, 0.8401200771331787, 0.5898805856704712
        ]),
        rtol=1e-4,
        atol=1e-5,
    )
    np.testing.assert_allclose(
        clf.history[:, 'valid_loss'],
        np.array([
            0.8467752933502197, 1.0855580568313599, 0.873993992805481,
            0.8403236865997314, 0.8534432053565979, 0.8854812383651733
        ]),
        rtol=1e-4,
        atol=1e-5,
    )
    np.testing.assert_allclose(
        clf.history[:, 'train_accuracy'],
        np.array(
            [0.7166666666666667, 0.6666666666666666, 0.8, 0.9, 0.95, 0.95]),
        rtol=1e-4,
        atol=1e-5,
    )
    np.testing.assert_allclose(
        clf.history[:, 'valid_accuracy'],
        np.array([
            0.6, 0.5666666666666667, 0.5666666666666667, 0.5,
            0.5333333333333333, 0.6333333333333333
        ]),
        rtol=1e-4,
        atol=1e-5,
    )
def test_eeg_classifier():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_time_length=input_time_length,
        final_conv_length=12,
    )
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]

    train_set = CroppedXyDataset(X[:60],
                                 y=y[:60],
                                 input_time_length=input_time_length,
                                 n_preds_per_input=n_preds_per_input)

    cropped_cb_train = CroppedTrialEpochScoring(
        "accuracy",
        name="train_trial_accuracy",
        lower_is_better=False,
        on_train=True,
    )

    cropped_cb_valid = CroppedTrialEpochScoring(
        "accuracy",
        on_train=False,
        name="valid_trial_accuracy",
        lower_is_better=False,
    )

    clf = EEGClassifier(
        model,
        criterion=CroppedNLLLoss,
        optimizer=optim.Adam,
        train_split=TrainTestSplit(
            train_size=0.8,
            input_time_length=input_time_length,
            n_preds_per_input=n_preds_per_input,
        ),
        batch_size=32,
        callbacks=[
            ("train_trial_accuracy", cropped_cb_train),
            ("valid_trial_accuracy", cropped_cb_valid),
        ],
    )

    clf.fit(train_set.X, train_set.y, epochs=4)

    expected = [
        {
            "batches": [
                {
                    "train_loss": 2.0750882625579834,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 3.09424090385437,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.079931378364563,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 2.3208131790161133,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            1,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            2.083086848258972,
            "train_loss_best":
            True,
            "valid_loss":
            2.3208131790161133,
            "valid_loss_best":
            True,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            True,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            True,
        },
        {
            "batches": [
                {
                    "train_loss": 1.827332615852356,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.4135494232177734,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.1295170783996582,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 1.4291356801986694,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            2,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            1.4567997058232625,
            "train_loss_best":
            True,
            "valid_loss":
            1.4291356801986694,
            "valid_loss_best":
            True,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            False,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            False,
        },
        {
            "batches": [
                {
                    "train_loss": 1.1495535373687744,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 2.356320381164551,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 0.9548418521881104,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 2.248246908187866,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            3,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            1.4869052569071453,
            "train_loss_best":
            False,
            "valid_loss":
            2.248246908187866,
            "valid_loss_best":
            False,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            False,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            False,
        },
        {
            "batches": [
                {
                    "train_loss": 1.2157528400421143,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 1.1182057857513428,
                    "train_batch_size": 32
                },
                {
                    "train_loss": 0.9163083434104919,
                    "train_batch_size": 32
                },
                {
                    "valid_loss": 0.9732739925384521,
                    "valid_batch_size": 24
                },
            ],
            "epoch":
            4,
            "train_batch_count":
            3,
            "valid_batch_count":
            1,
            "train_loss":
            1.083422323067983,
            "train_loss_best":
            True,
            "valid_loss":
            0.9732739925384521,
            "valid_loss_best":
            True,
            "train_trial_accuracy":
            0.5,
            "train_trial_accuracy_best":
            False,
            "valid_trial_accuracy":
            0.5,
            "valid_trial_accuracy_best":
            False,
        },
    ]

    history_without_dur = [{k: v
                            for k, v in h.items() if k != "dur"}
                           for h in clf.history]
    assert_deep_allclose(history_without_dur, expected, atol=1e-3, rtol=1e-3)
Example #20
0
def test_post_epoch_train_scoring():
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    n_classes = 2

    class EEGDataSet(Dataset):
        def __init__(self, X, y):
            self.X = X
            if self.X.ndim == 3:
                self.X = self.X[:, :, :, None]
            self.y = y

        def __len__(self):
            return len(self.X)

        def __getitem__(self, idx):
            return self.X[idx], self.y[idx]

    X, y = sklearn.datasets.make_classification(
        40, (3 * 100), n_informative=3 * 50, n_classes=2
    )
    X = X.reshape(40, 3, 100).astype(np.float32)

    in_chans = X.shape[1]

    train_set = EEGDataSet(X, y)

    class TestCallback(Callback):
        def on_epoch_end(self, net, *args, **kwargs):
            preds = net.predict(train_set.X)
            y_true = train_set.y
            np.testing.assert_allclose(
                clf.history[-1]["train_f1"],
                f1_score(y_true, preds),
                rtol=1e-4,
                atol=1e-4,
            )
            np.testing.assert_allclose(
                clf.history[-1]["train_acc"],
                accuracy_score(y_true, preds),
                rtol=1e-4,
                atol=1e-4,
            )

    set_random_seeds(20200114, cuda)

    # final_conv_length = auto ensures
    # we only get a single output in the time dimension
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_window_samples=train_set.X.shape[2],
        pool_time_stride=1,
        pool_time_length=2,
        final_conv_length="auto",
    )
    if cuda:
        model.cuda()

    clf = EEGClassifier(
        model,
        criterion=torch.nn.NLLLoss,
        optimizer=optim.AdamW,
        train_split=None,
        optimizer__lr=0.0625 * 0.01,
        optimizer__weight_decay=0,
        batch_size=64,
        callbacks=[
            (
                "train_accuracy",
                PostEpochTrainScoring(
                    "accuracy", lower_is_better=False, name="train_acc"
                ),
            ),
            (
                "train_f1_score",
                PostEpochTrainScoring(
                    "f1", lower_is_better=False, name="train_f1"
                ),
            ),
            ("test_callback", TestCallback()),
        ],
    )

    clf.fit(train_set, y=None, epochs=4)
Example #21
0
def exp(subject_id):
    import torch
    input_window_samples = 1000

    cuda = torch.cuda.is_available()  # check if GPU is available, if True chooses to use it
    device = 'cuda:0' if cuda else 'cpu'
    if cuda:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    seed = 20190706  # random seed to make results reproducible
    # Set random seed to be able to reproduce results
    random.seed(seed)
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)

    n_classes = 4

    PATH = '../datasets/'
    with open(PATH + 'bcic_datasets_[0,49].pkl', 'rb') as f:
        data = pickle.load(f)

    import torch

    print('subject:' + str(subject_id))


    #make train test
    tr = []
    val =[]
    test_train_split = 0.5

    dataset= data[subject_id]

    dataset_size = len(dataset)
    indices = list(range(dataset_size))

    test_split = int(np.floor(test_train_split * dataset_size))

    train_indices, test_indices = indices[:test_split], indices[test_split:]

    np.random.shuffle(train_indices)
    #분석
    sample_data = data[0].dataset
    sample_data.psd()
    from mne.viz import plot_epochs_image
    import mne
    plot_epochs_image(sample_data, picks=['C3','C4'])

    label = sample_data.read_label()

    sample_data.plot_projs_topomap()

    train_sampler = SubsetRandomSampler(train_indices)
    test_sampler = SubsetRandomSampler(test_indices)

    from braindecode.models import ShallowFBCSPNet
    model = ShallowFBCSPNet(
        22,
        n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=30,
    )

    from braindecode.models.util import to_dense_prediction_model, get_output_shape
    to_dense_prediction_model(model)

    n_preds_per_input = get_output_shape(model, 22, input_window_samples)[2]
    print("n_preds_per_input : ", n_preds_per_input)
    print(model)


    # crop_size =1000
    #
    #
    #
    #
    # model = ShallowNet_dense(n_classes, 22, crop_size)
    #
    # print(model)

    epochs = 100

    # For deep4 they should be:
    lr = 1 * 0.01
    weight_decay = 0.5 * 0.001

    batch_size = 8

    train_set = torch.utils.data.Subset(dataset,indices= train_indices)
    test_set = torch.utils.data.Subset(dataset,indices= test_indices)

    train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)
    # Send model to GPU
    if cuda:
        model.cuda(device=device)

    from torch.optim import lr_scheduler
    import torch.optim as optim

    import argparse
    parser = argparse.ArgumentParser(description='cross subject domain adaptation')
    parser.add_argument('--batch-size', type=int, default=50, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=50, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=100, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model', action='store_true', default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    args.gpuidx = 0
    args.seed = 0
    args.use_tensorboard = False
    args.save_model = False

    lr = 0.0625 * 0.01
    weight_decay = 0
    optimizer = optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
    # scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
    scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max = epochs-1)
    #
    # #test lr
    # lr = []
    # for i in range(200):
    #     scheduler.step()
    #     lr.append(scheduler.get_lr())
    #
    # import matplotlib.pyplot as plt
    # plt.plot(lr)

    import pandas as pd
    results_columns = ['test_loss', 'test_accuracy']
    df = pd.DataFrame(columns=results_columns)

    for epochidx in range(1, epochs):
        print(epochidx)
        train_crop(10, model, device, train_loader,optimizer,scheduler,cuda, args.gpuidx)
        test_loss, test_score = eval_crop(model, device, test_loader)
        results = {'test_loss': test_loss, 'test_accuracy': test_score}
        df = df.append(results, ignore_index=True)
        print(results)

    return df
Example #22
0
def test_cropped_decoding():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(
        subject_id, event_codes, update_path=False
    )

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(
            path, preload=True, stim_channel="auto", verbose="WARNING"
        )
        for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)
    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(
        raw.info, meg=False, eeg=True, stim=False, eog=False, exclude="bads"
    )

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )
    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_time_length=input_time_length,
        final_conv_length=12,
    )
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    # Perform forward pass to determine how many outputs per input
    n_preds_per_input = get_output_shape(model, in_chans, input_time_length)[2]

    train_set = CroppedXyDataset(X[:60], y[:60],
                                 input_time_length=input_time_length,
                                 n_preds_per_input=n_preds_per_input)
    valid_set = CroppedXyDataset(X[60:], y=y[60:],
                                 input_time_length=input_time_length,
                                 n_preds_per_input=n_preds_per_input)
    train_split = predefined_split(valid_set)

    clf = EEGClassifier(
        model,
        cropped=True,
        criterion=CroppedLoss,
        criterion__loss_function=torch.nn.functional.nll_loss,
        optimizer=optim.Adam,
        train_split=train_split,
        batch_size=32,
        callbacks=['accuracy'],
    )

    clf.fit(train_set, y=None, epochs=4)

    np.testing.assert_allclose(
        clf.history[:, 'train_loss'],
        np.array(
            [
                1.455306,
                1.455934,
                1.210563,
                1.065806
            ]
        ),
        rtol=1e-4,
        atol=1e-5,
    )

    np.testing.assert_allclose(
        clf.history[:, 'valid_loss'],
        np.array(
            [
                2.547288,
                1.51785,
                1.394036,
                1.064355
            ]
        ),
        rtol=1e-4,
        atol=1e-4,
    )
    np.testing.assert_allclose(
        clf.history[:, 'train_accuracy'],
        np.array(
            [
                0.5,
                0.5,
                0.5,
                0.533333
            ]
        ),
        rtol=1e-4,
        atol=1e-5,
    )
    np.testing.assert_allclose(
        clf.history[:, 'valid_accuracy'],
        np.array(
            [
                0.533333,
                0.466667,
                0.533333,
                0.5
            ]
        ),
        rtol=1e-4,
        atol=1e-5,
    )
Example #23
0
def test_cropped_decoding():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=True)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel='auto',
                            verbose='WARNING') for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude='bads')

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(raw,
                         events,
                         dict(hands=2, feet=3),
                         tmin=1,
                         tmax=4.1,
                         proj=False,
                         picks=eeg_channel_inds,
                         baseline=None,
                         preload=True)
    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    train_set = SignalAndTarget(X[:60], y=y[:60])
    test_set = SignalAndTarget(X[60:], y=y[60:])

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = train_set.X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(in_chans=in_chans,
                            n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length=12).create_network()
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    optimizer = optim.Adam(model.parameters())
    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    print("{:d} predictions per input/trial".format(n_preds_per_input))

    iterator = CropsFromTrialsIterator(batch_size=32,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)

    losses = []
    accuracies = []
    for i_epoch in range(4):
        # Set model to training mode
        model.train()
        for batch_X, batch_y in iterator.get_batches(train_set, shuffle=False):
            net_in = np_to_var(batch_X)
            if cuda:
                net_in = net_in.cuda()
            net_target = np_to_var(batch_y)
            if cuda:
                net_target = net_target.cuda()
            # Remove gradients of last backward pass from all parameters
            optimizer.zero_grad()
            outputs = model(net_in)
            # Mean predictions across trial
            # Note that this will give identical gradients to computing
            # a per-prediction loss (at least for the combination of log softmax activation
            # and negative log likelihood loss which we are using here)
            outputs = th.mean(outputs, dim=2, keepdim=False)
            loss = F.nll_loss(outputs, net_target)
            loss.backward()
            optimizer.step()

        # Print some statistics each epoch
        model.eval()
        print("Epoch {:d}".format(i_epoch))
        for setname, dataset in (('Train', train_set), ('Test', test_set)):
            # Collect all predictions and losses
            all_preds = []
            all_losses = []
            batch_sizes = []
            for batch_X, batch_y in iterator.get_batches(dataset,
                                                         shuffle=False):
                net_in = np_to_var(batch_X)
                if cuda:
                    net_in = net_in.cuda()
                net_target = np_to_var(batch_y)
                if cuda:
                    net_target = net_target.cuda()
                outputs = model(net_in)
                all_preds.append(var_to_np(outputs))
                outputs = th.mean(outputs, dim=2, keepdim=False)
                loss = F.nll_loss(outputs, net_target)
                loss = float(var_to_np(loss))
                all_losses.append(loss)
                batch_sizes.append(len(batch_X))
            # Compute mean per-input loss
            loss = np.mean(
                np.array(all_losses) * np.array(batch_sizes) /
                np.mean(batch_sizes))
            print("{:6s} Loss: {:.5f}".format(setname, loss))
            losses.append(loss)
            # Assign the predictions to the trials
            preds_per_trial = compute_preds_per_trial_from_crops(
                all_preds, input_time_length, dataset.X)
            # preds per trial are now trials x classes x timesteps/predictions
            # Now mean across timesteps for each trial to get per-trial predictions
            meaned_preds_per_trial = np.array(
                [np.mean(p, axis=1) for p in preds_per_trial])
            predicted_labels = np.argmax(meaned_preds_per_trial, axis=1)
            accuracy = np.mean(predicted_labels == dataset.y)
            accuracies.append(accuracy * 100)
            print("{:6s} Accuracy: {:.1f}%".format(setname, accuracy * 100))
    np.testing.assert_allclose(np.array(losses),
                               np.array([
                                   1.31657708, 1.73548156, 1.02950428,
                                   1.43932164, 0.78677772, 1.12382019,
                                   0.55920881, 0.87277424
                               ]),
                               rtol=1e-4,
                               atol=1e-5)
    np.testing.assert_allclose(np.array(accuracies),
                               np.array([
                                   50., 46.66666667, 50., 46.66666667, 50.,
                                   46.66666667, 66.66666667, 50.
                               ]),
                               rtol=1e-4,
                               atol=1e-5)
        final_conv_length=1,
    )
    optimizer_lr = 0.01
    optimizer_weight_decay = 0.0005
else:
    raise ValueError(f'{model_name} unknown')

new_model = torch.nn.Sequential()
for name, module_ in model.named_children():
    if "softmax" in name:
        continue
    new_model.add_module(name, module_)
model = new_model

if cuda:
    model.cuda()

to_dense_prediction_model(model)
n_preds_per_input = get_output_shape(model, n_chans, input_window_samples)[2]

train_set, valid_set = create_compatible_dataset('./data/BCICIV_4_mat/sub1_comp.mat')

# dataset = create_fixed_length_windows(
#     dataset,
#     start_offset_samples=0,
#     stop_offset_samples=0,
#     window_size_samples=input_window_samples,
#     window_stride_samples=n_preds_per_input,
#     drop_last_window=False,
#     drop_bad_windows=True,
# )
def test_trialwise_decoding():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=False)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel="auto",
                            verbose="WARNING") for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)
    raw.apply_function(lambda x: x * 1000000)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)
    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude="bads")

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    ds = EpochsDataset(epoched)

    train_set = Subset(ds, np.arange(60))
    valid_set = Subset(ds, np.arange(60, len(ds)))

    train_valid_split = predefined_split(valid_set)

    cuda = False
    if cuda:
        device = 'cuda'
    else:
        device = 'cpu'
    set_random_seeds(seed=20170629, cuda=cuda)
    n_classes = 2
    in_chans = train_set[0][0].shape[0]
    input_window_samples = train_set[0][0].shape[1]
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_window_samples=input_window_samples,
        final_conv_length="auto",
    )
    if cuda:
        model.cuda()

    clf = EEGClassifier(
        model,
        cropped=False,
        criterion=torch.nn.NLLLoss,
        optimizer=torch.optim.Adam,
        train_split=train_valid_split,
        optimizer__lr=0.001,
        batch_size=30,
        callbacks=["accuracy"],
        device=device,
    )
    clf.fit(train_set, y=None, epochs=6)

    np.testing.assert_allclose(
        clf.history[:, 'train_loss'],
        np.array([1.623811, 1.141197, 0.730361, 0.5994, 0.738884, 0.419241]),
        rtol=1e-4,
        atol=1e-5,
    )

    np.testing.assert_allclose(
        clf.history[:, 'valid_loss'],
        np.array([
            1.958356261253357, 0.8494758009910583, 0.9321595430374146,
            0.8126973509788513, 0.7426613569259644, 0.7547585368156433
        ]),
        rtol=1e-4,
        atol=1e-5,
    )

    np.testing.assert_allclose(
        clf.history[:, 'train_accuracy'],
        np.array([0.5, 0.8166666666666667, 0.7, 0.8, 0.8666666666666667, 0.9]),
        rtol=1e-4,
        atol=1e-5,
    )

    np.testing.assert_allclose(
        clf.history[:, 'valid_accuracy'],
        np.array([
            0.4666666666666667,
            0.6,
            0.5,
            0.5666666666666667,
            0.6,
            0.6,
        ]),
        rtol=1e-4,
        atol=1e-5,
    )