Beispiel #1
0
def test_trialwise_decoding():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(
        subject_id, event_codes, update_path=False
    )

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(
            path, preload=True, stim_channel="auto", verbose="WARNING"
        )
        for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)
    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(
        raw.info, meg=False, eeg=True, stim=False, eog=False, exclude="bads"
    )

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(
        raw,
        events,
        dict(hands=2, feet=3),
        tmin=1,
        tmax=4.1,
        proj=False,
        picks=eeg_channel_inds,
        baseline=None,
        preload=True,
    )

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    SignalAndTarget = namedtuple("SignalAndTarget", "X y")

    train_set = SignalAndTarget(X[:60], y=y[:60])
    test_set = SignalAndTarget(X[60:], y=y[60:])

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)
    n_classes = 2
    in_chans = train_set.X.shape[1]
    # final_conv_length = auto ensures we only get a single output in the time dimension
    model = ShallowFBCSPNet(
        in_chans=in_chans,
        n_classes=n_classes,
        input_time_length=train_set.X.shape[2],
        final_conv_length="auto",
    )
    if cuda:
        model.cuda()

    optimizer = optim.Adam(model.parameters())

    rng = RandomState((2017, 6, 30))
    losses = []
    accuracies = []
    for i_epoch in range(6):
        i_trials_in_batch = get_balanced_batches(
            len(train_set.X), rng, shuffle=True, batch_size=30
        )
        # Set model to training mode
        model.train()
        for i_trials in i_trials_in_batch:
            # Have to add empty fourth dimension to X
            batch_X = train_set.X[i_trials][:, :, :, None]
            batch_y = train_set.y[i_trials]
            net_in = np_to_var(batch_X)
            if cuda:
                net_in = net_in.cuda()
            net_target = np_to_var(batch_y)
            if cuda:
                net_target = net_target.cuda()
            # Remove gradients of last backward pass from all parameters
            optimizer.zero_grad()
            # Compute outputs of the network
            outputs = model(net_in)
            # Compute the loss
            loss = F.nll_loss(outputs, net_target)
            # Do the backpropagation
            loss.backward()
            # Update parameters with the optimizer
            optimizer.step()

        # Print some statistics each epoch
        model.eval()
        print("Epoch {:d}".format(i_epoch))
        for setname, dataset in (("Train", train_set), ("Test", test_set)):
            # Here, we will use the entire dataset at once, which is still possible
            # for such smaller datasets. Otherwise we would have to use batches.
            net_in = np_to_var(dataset.X[:, :, :, None])
            if cuda:
                net_in = net_in.cuda()
            net_target = np_to_var(dataset.y)
            if cuda:
                net_target = net_target.cuda()
            outputs = model(net_in)
            loss = F.nll_loss(outputs, net_target)
            losses.append(float(var_to_np(loss)))
            print("{:6s} Loss: {:.5f}".format(setname, float(var_to_np(loss))))
            predicted_labels = np.argmax(var_to_np(outputs), axis=1)
            accuracy = np.mean(dataset.y == predicted_labels)
            accuracies.append(accuracy * 100)
            print("{:6s} Accuracy: {:.1f}%".format(setname, accuracy * 100))

    np.testing.assert_allclose(
        np.array(losses),
        np.array(
            [
                0.91796708,
                1.2714895,
                0.4999536,
                0.94365239,
                0.39268905,
                0.89928466,
                0.37648854,
                0.8940345,
                0.35774994,
                0.86749417,
                0.35080773,
                0.80767328,
            ]
        ),
        rtol=1e-4,
        atol=1e-5,
    )

    np.testing.assert_allclose(
        np.array(accuracies),
        np.array(
            [
                55.0,
                63.33333333,
                71.66666667,
                63.33333333,
                81.66666667,
                60.0,
                78.33333333,
                63.33333333,
                83.33333333,
                66.66666667,
                80.0,
                66.66666667,
            ]
        ),
        rtol=1e-4,
        atol=1e-5,
    )
Beispiel #2
0
#net=TSception(1000,n_chans,3,3,0.5)

img_size = [n_chans, wind]
#net = timm.create_model('visformer_tiny',num_classes=n_classes,in_chans=1,img_size=img_size)
if cuda:
    net.cuda()

lr = 0.005
weight_decay = 1e-10

criterion = torch.nn.CrossEntropyLoss()
#criterion = nn.NLLLoss()
#optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
#optimizer = torch.optim.Adadelta(net.parameters(), lr=lr)
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# Decay LR by a factor of 0.1 every 7 epochs
lr_schedulerr = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)

epoch_num = 100

for epoch in range(epoch_num):
    print("------ epoch " + str(epoch) + " -----")
    net.train()

    loss_epoch = 0

    running_loss = 0.0
    running_corrects = 0
    for batch, (trainx, trainy) in enumerate(train_loader):
        if isinstance(net, timm.models.visformer.Visformer):
Beispiel #3
0
def exp(subject_id):
    dataset = MOABBDataset(dataset_name="BNCI2014001", subject_ids=subject_id)

    from braindecode.datautil.preprocess import exponential_moving_standardize
    from braindecode.datautil.preprocess import MNEPreproc, NumpyPreproc, preprocess

    low_cut_hz = 0.  # low cut frequency for filtering
    high_cut_hz = 49.  # high cut frequency for filtering
    # Parameters for exponential moving standardization
    factor_new = 1e-3
    init_block_size = 1000

    preprocessors = [
        # keep only EEG sensors
        MNEPreproc(fn='pick_types', eeg=True, meg=False, stim=False),
        # convert from volt to microvolt, directly modifying the numpy array
        NumpyPreproc(fn=lambda x: x * 1e6),
        # bandpass filter
        MNEPreproc(fn='filter', l_freq=low_cut_hz, h_freq=high_cut_hz),
        # exponential moving standardization
        # NumpyPreproc(fn=exponential_moving_standardize, factor_new=factor_new,
        #     init_block_size=init_block_size)
    ]

    # Transform the data
    preprocess(dataset, preprocessors)

    ######################################################################
    # Create model and compute windowing parameters
    # ---------------------------------------------
    #

    ######################################################################
    # In contrast to trialwise decoding, we first have to create the model
    # before we can cut the dataset into windows. This is because we need to
    # know the receptive field of the network to know how large the window
    # stride should be.
    #

    ######################################################################
    # We first choose the compute/input window size that will be fed to the
    # network during training This has to be larger than the networks
    # receptive field size and can otherwise be chosen for computational
    # efficiency (see explanations in the beginning of this tutorial). Here we
    # choose 1000 samples, which are 4 seconds for the 250 Hz sampling rate.
    #

    input_window_samples = 1000

    ######################################################################
    # Now we create the model. To enable it to be used in cropped decoding
    # efficiently, we manually set the length of the final convolution layer
    # to some length that makes the receptive field of the ConvNet smaller
    # than ``input_window_samples`` (see ``final_conv_length=30`` in the model
    # definition).
    #

    import torch
    from braindecode.util import set_random_seeds
    from braindecode.models import ShallowFBCSPNet, Deep4Net

    cuda = torch.cuda.is_available(
    )  # check if GPU is available, if True chooses to use it
    device = 'cuda:1' if cuda else 'cpu'
    if cuda:
        torch.backends.cudnn.benchmark = True
    seed = 20190706  # random seed to make results reproducible
    # Set random seed to be able to reproduce results
    set_random_seeds(seed=seed, cuda=cuda)

    n_classes = 4
    # Extract number of chans from dataset
    n_chans = dataset[0][0].shape[0]

    # model = Deep4Net(
    #     n_chans,
    #     n_classes,
    #     input_window_samples=input_window_samples,
    #     final_conv_length="auto",
    # )
    #
    #
    #
    # embedding_net = Deep4Net_origin(4, 22, input_window_samples)
    # model = FcClfNet(embedding_net)

    model = ShallowFBCSPNet(
        n_chans,
        n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=30,
    )

    print(model)

    # Send model to GPU
    if cuda:
        model.cuda(device)

    ######################################################################
    # And now we transform model with strides to a model that outputs dense
    # prediction, so we can use it to obtain predictions for all
    # crops.
    #

    from braindecode.models.util import to_dense_prediction_model, get_output_shape
    to_dense_prediction_model(model)

    n_preds_per_input = get_output_shape(model, n_chans,
                                         input_window_samples)[2]
    print("n_preds_per_input : ", n_preds_per_input)
    print(model)

    ######################################################################
    # Cut the data into windows
    # -------------------------
    #

    ######################################################################
    # In contrast to trialwise decoding, we have to supply an explicit window size and window stride to the
    # ``create_windows_from_events`` function.
    #

    import numpy as np
    from braindecode.datautil.windowers import create_windows_from_events

    trial_start_offset_seconds = -0.5
    # Extract sampling frequency, check that they are same in all datasets
    sfreq = dataset.datasets[0].raw.info['sfreq']
    assert all([ds.raw.info['sfreq'] == sfreq for ds in dataset.datasets])

    # Calculate the trial start offset in samples.
    trial_start_offset_samples = int(trial_start_offset_seconds * sfreq)

    # Create windows using braindecode function for this. It needs parameters to define how
    # trials should be used.
    windows_dataset = create_windows_from_events(
        dataset,
        trial_start_offset_samples=trial_start_offset_samples,
        trial_stop_offset_samples=0,
        window_size_samples=input_window_samples,
        window_stride_samples=n_preds_per_input,
        drop_last_window=False,
        preload=True,
    )

    ######################################################################
    # Split the dataset
    # -----------------
    #
    # This code is the same as in trialwise decoding.
    #

    from braindecode.datasets.base import BaseConcatDataset
    splitted = windows_dataset.split('session')

    train_set = splitted['session_T']
    valid_set = splitted['session_E']

    lr = 0.0625 * 0.01
    weight_decay = 0
    batch_size = 8
    n_epochs = 100

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=batch_size,
                                               shuffle=True)
    # valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=False)
    test_loader = torch.utils.data.DataLoader(valid_set,
                                              batch_size=batch_size,
                                              shuffle=False)

    # Send model to GPU
    if cuda:
        model.cuda(device)

    from torch.optim import lr_scheduler
    import torch.optim as optim

    import argparse
    parser = argparse.ArgumentParser(
        description='cross subject domain adaptation')
    parser.add_argument('--batch-size',
                        type=int,
                        default=50,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=50,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    args.gpuidx = 0
    args.seed = 0
    args.use_tensorboard = False
    args.save_model = False

    optimizer = optim.AdamW(model.parameters(),
                            lr=lr,
                            weight_decay=weight_decay)
    # scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=1)
    scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=n_epochs - 1)

    import pandas as pd
    results_columns = ['test_loss', 'test_accuracy']
    df = pd.DataFrame(columns=results_columns)

    for epochidx in range(1, n_epochs):
        print(epochidx)
        train_crop(10, model, device, train_loader, optimizer, scheduler, cuda,
                   args.gpuidx)
        test_loss, test_score = eval_crop(model, device, test_loader)
        results = {'test_loss': test_loss, 'test_accuracy': test_score}
        df = df.append(results, ignore_index=True)
        print(results)

    return df
def create_example_model(n_channels,
                         n_classes,
                         window_len_samples,
                         kind='shallow',
                         cuda=False):
    """Create model, loss and optimizer.

    Parameters
    ----------
    n_channels : int
        Number of channels in the input
    n_times : int
        Window length in the input
    n_classes : int
        Number of classes in the output
    kind : str
        'shallow' or 'deep'
    cuda : bool
        If True, move the model to a CUDA device.

    Returns
    -------
    model : torch.nn.Module
        Model to train.
    loss :
        Loss function
    optimizer :
        Optimizer
    """
    if kind == 'shallow':
        model = ShallowFBCSPNet(n_channels,
                                n_classes,
                                input_window_samples=window_len_samples,
                                n_filters_time=40,
                                filter_time_length=25,
                                n_filters_spat=40,
                                pool_time_length=75,
                                pool_time_stride=15,
                                final_conv_length='auto',
                                split_first_layer=True,
                                batch_norm=True,
                                batch_norm_alpha=0.1,
                                drop_prob=0.5)
    elif kind == 'deep':
        model = Deep4Net(n_channels,
                         n_classes,
                         input_window_samples=window_len_samples,
                         final_conv_length='auto',
                         n_filters_time=25,
                         n_filters_spat=25,
                         filter_time_length=10,
                         pool_time_length=3,
                         pool_time_stride=3,
                         n_filters_2=50,
                         filter_length_2=10,
                         n_filters_3=100,
                         filter_length_3=10,
                         n_filters_4=200,
                         filter_length_4=10,
                         first_pool_mode="max",
                         later_pool_mode="max",
                         drop_prob=0.5,
                         double_time_convs=False,
                         split_first_layer=True,
                         batch_norm=True,
                         batch_norm_alpha=0.1,
                         stride_before_pool=False)
    else:
        raise ValueError

    if cuda:
        model.cuda()

    optimizer = optim.Adam(model.parameters())
    loss = nn.NLLLoss()

    return model, loss, optimizer
Beispiel #5
0
def test_cropped_decoding():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id,
                                                    event_codes,
                                                    update_path=True)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel='auto',
                            verbose='WARNING') for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude='bads')

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(raw,
                         events,
                         dict(hands=2, feet=3),
                         tmin=1,
                         tmax=4.1,
                         proj=False,
                         picks=eeg_channel_inds,
                         baseline=None,
                         preload=True)
    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    train_set = SignalAndTarget(X[:60], y=y[:60])
    test_set = SignalAndTarget(X[60:], y=y[60:])

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = train_set.X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(in_chans=in_chans,
                            n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length=12).create_network()
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    optimizer = optim.Adam(model.parameters())
    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    print("{:d} predictions per input/trial".format(n_preds_per_input))

    iterator = CropsFromTrialsIterator(batch_size=32,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)

    losses = []
    accuracies = []
    for i_epoch in range(4):
        # Set model to training mode
        model.train()
        for batch_X, batch_y in iterator.get_batches(train_set, shuffle=False):
            net_in = np_to_var(batch_X)
            if cuda:
                net_in = net_in.cuda()
            net_target = np_to_var(batch_y)
            if cuda:
                net_target = net_target.cuda()
            # Remove gradients of last backward pass from all parameters
            optimizer.zero_grad()
            outputs = model(net_in)
            # Mean predictions across trial
            # Note that this will give identical gradients to computing
            # a per-prediction loss (at least for the combination of log softmax activation
            # and negative log likelihood loss which we are using here)
            outputs = th.mean(outputs, dim=2, keepdim=False)
            loss = F.nll_loss(outputs, net_target)
            loss.backward()
            optimizer.step()

        # Print some statistics each epoch
        model.eval()
        print("Epoch {:d}".format(i_epoch))
        for setname, dataset in (('Train', train_set), ('Test', test_set)):
            # Collect all predictions and losses
            all_preds = []
            all_losses = []
            batch_sizes = []
            for batch_X, batch_y in iterator.get_batches(dataset,
                                                         shuffle=False):
                net_in = np_to_var(batch_X)
                if cuda:
                    net_in = net_in.cuda()
                net_target = np_to_var(batch_y)
                if cuda:
                    net_target = net_target.cuda()
                outputs = model(net_in)
                all_preds.append(var_to_np(outputs))
                outputs = th.mean(outputs, dim=2, keepdim=False)
                loss = F.nll_loss(outputs, net_target)
                loss = float(var_to_np(loss))
                all_losses.append(loss)
                batch_sizes.append(len(batch_X))
            # Compute mean per-input loss
            loss = np.mean(
                np.array(all_losses) * np.array(batch_sizes) /
                np.mean(batch_sizes))
            print("{:6s} Loss: {:.5f}".format(setname, loss))
            losses.append(loss)
            # Assign the predictions to the trials
            preds_per_trial = compute_preds_per_trial_from_crops(
                all_preds, input_time_length, dataset.X)
            # preds per trial are now trials x classes x timesteps/predictions
            # Now mean across timesteps for each trial to get per-trial predictions
            meaned_preds_per_trial = np.array(
                [np.mean(p, axis=1) for p in preds_per_trial])
            predicted_labels = np.argmax(meaned_preds_per_trial, axis=1)
            accuracy = np.mean(predicted_labels == dataset.y)
            accuracies.append(accuracy * 100)
            print("{:6s} Accuracy: {:.1f}%".format(setname, accuracy * 100))
    np.testing.assert_allclose(np.array(losses),
                               np.array([
                                   1.31657708, 1.73548156, 1.02950428,
                                   1.43932164, 0.78677772, 1.12382019,
                                   0.55920881, 0.87277424
                               ]),
                               rtol=1e-4,
                               atol=1e-5)
    np.testing.assert_allclose(np.array(accuracies),
                               np.array([
                                   50., 46.66666667, 50., 46.66666667, 50.,
                                   46.66666667, 66.66666667, 50.
                               ]),
                               rtol=1e-4,
                               atol=1e-5)
# We use [AdamW](https://arxiv.org/abs/1711.05101) to optimize the parameters of our network together with [Cosine Annealing](https://arxiv.org/abs/1608.03983) of the learning rate. We supply some default parameters that we have found to work well for motor decoding, however we strongly encourage you to perform your own hyperparameter optimization using cross validation on your training data.

# <div class="alert alert-info">
#
# We will now use the Braindecode model class directly to perform the training in a few lines of code. If you instead want to use your own training loop, have a look at the [Trialwise Low-Level Tutorial](./TrialWise_LowLevel.html).
#
# </div>

# In[ ]:

# from braindecode.torch_ext.optimizers import AdamW
from torch.optim import Adam
import torch.nn.functional as F
# optimizer = AdamW(model.parameters(), lr=1*0.01, weight_decay=0.5*0.001) # these are good values for the deep model
optimizer = Adam(model.parameters(), lr=0.0625 * 0.01, weight_decay=0)
model.compile(
    loss=F.nll_loss,
    optimizer=optimizer,
    iterator_seed=1,
)

# ## Run the training

# In[ ]:

n_epochs = 4
model.fit(train_set.X,
          train_set.y,
          n_epochs=n_epochs,
          batch_size=64,
Beispiel #7
0
def exp(subject_id):
    import torch
    input_window_samples = 1000

    cuda = torch.cuda.is_available()  # check if GPU is available, if True chooses to use it
    device = 'cuda:0' if cuda else 'cpu'
    if cuda:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    seed = 20190706  # random seed to make results reproducible
    # Set random seed to be able to reproduce results
    random.seed(seed)
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)

    n_classes = 4

    PATH = '../datasets/'
    with open(PATH + 'bcic_datasets_[0,49].pkl', 'rb') as f:
        data = pickle.load(f)

    import torch

    print('subject:' + str(subject_id))


    #make train test
    tr = []
    val =[]
    test_train_split = 0.5

    dataset= data[subject_id]

    dataset_size = len(dataset)
    indices = list(range(dataset_size))

    test_split = int(np.floor(test_train_split * dataset_size))

    train_indices, test_indices = indices[:test_split], indices[test_split:]

    np.random.shuffle(train_indices)
    #분석
    sample_data = data[0].dataset
    sample_data.psd()
    from mne.viz import plot_epochs_image
    import mne
    plot_epochs_image(sample_data, picks=['C3','C4'])

    label = sample_data.read_label()

    sample_data.plot_projs_topomap()

    train_sampler = SubsetRandomSampler(train_indices)
    test_sampler = SubsetRandomSampler(test_indices)

    from braindecode.models import ShallowFBCSPNet
    model = ShallowFBCSPNet(
        22,
        n_classes,
        input_window_samples=input_window_samples,
        final_conv_length=30,
    )

    from braindecode.models.util import to_dense_prediction_model, get_output_shape
    to_dense_prediction_model(model)

    n_preds_per_input = get_output_shape(model, 22, input_window_samples)[2]
    print("n_preds_per_input : ", n_preds_per_input)
    print(model)


    # crop_size =1000
    #
    #
    #
    #
    # model = ShallowNet_dense(n_classes, 22, crop_size)
    #
    # print(model)

    epochs = 100

    # For deep4 they should be:
    lr = 1 * 0.01
    weight_decay = 0.5 * 0.001

    batch_size = 8

    train_set = torch.utils.data.Subset(dataset,indices= train_indices)
    test_set = torch.utils.data.Subset(dataset,indices= test_indices)

    train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)
    # Send model to GPU
    if cuda:
        model.cuda(device=device)

    from torch.optim import lr_scheduler
    import torch.optim as optim

    import argparse
    parser = argparse.ArgumentParser(description='cross subject domain adaptation')
    parser.add_argument('--batch-size', type=int, default=50, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=50, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=100, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model', action='store_true', default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    args.gpuidx = 0
    args.seed = 0
    args.use_tensorboard = False
    args.save_model = False

    lr = 0.0625 * 0.01
    weight_decay = 0
    optimizer = optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
    # scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
    scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max = epochs-1)
    #
    # #test lr
    # lr = []
    # for i in range(200):
    #     scheduler.step()
    #     lr.append(scheduler.get_lr())
    #
    # import matplotlib.pyplot as plt
    # plt.plot(lr)

    import pandas as pd
    results_columns = ['test_loss', 'test_accuracy']
    df = pd.DataFrame(columns=results_columns)

    for epochidx in range(1, epochs):
        print(epochidx)
        train_crop(10, model, device, train_loader,optimizer,scheduler,cuda, args.gpuidx)
        test_loss, test_score = eval_crop(model, device, test_loader)
        results = {'test_loss': test_loss, 'test_accuracy': test_score}
        df = df.append(results, ignore_index=True)
        print(results)

    return df
Beispiel #8
0
def exp(subject_id):
    import torch
    test_subj = np.r_[subject_id]

    print('test subj:' + str(test_subj))

    #20% validation
    train_size = int(0.9* len(splitted['session_T']))
    test_size = len(splitted['session_T']) - train_size



    # train_set, valid_set = torch.utils.data.random_split(splitted['session_T'], [train_size, test_size])
    train_set = splitted['session_T']
    test_set = splitted['session_E']



    # model = Deep4Net(
    #     n_chans,
    #     n_classes,
    #     input_window_samples=input_window_samples,
    #     final_conv_length="auto",
    # )

    from torch.utils.data import Dataset, ConcatDataset




    crop_size = 1000
    # embedding_net = Deep4Net_origin(n_classes, n_chans, crop_size)
    # model = FcClfNet(embedding_net)

    model = ShallowFBCSPNet(
        n_chans,
        n_classes,
        input_window_samples=input_window_samples,
        final_conv_length='auto',
    )

    from braindecode.models.util import to_dense_prediction_model, get_output_shape
    to_dense_prediction_model(model)

    n_preds_per_input = get_output_shape(model, 22, input_window_samples)[2]
    print("n_preds_per_input : ", n_preds_per_input)
    print(model)


    batch_size =8
    epochs = 200






    lr = 0.0625 * 0.01
    weight_decay = 0



    train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)
    # valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=False)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)



    # Send model to GPU
    if cuda:
        model.cuda()

    from torch.optim import lr_scheduler
    import torch.optim as optim

    import argparse
    parser = argparse.ArgumentParser(description='cross subject domain adaptation')
    parser.add_argument('--batch-size', type=int, default=50, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=50, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=100, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model', action='store_true', default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    args.gpuidx = 0
    args.seed = 0
    args.use_tensorboard = False
    args.save_model = False

    optimizer = optim.AdamW(model.parameters(), lr=0.01, weight_decay=0.5 * 0.001)
    # scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=1)
    scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs-1)



    import pandas as pd
    results_columns = ['test_loss',  'test_accuracy']
    df = pd.DataFrame(columns=results_columns)

    for epochidx in range(1, epochs):
        print(epochidx)
        train_crop(10, model, device, train_loader,optimizer,scheduler,cuda, args.gpuidx)
        test_loss, test_score = eval_crop(model, device, test_loader)
        results = { 'test_loss': test_loss, 'test_accuracy': test_score}
        df = df.append(results, ignore_index=True)
        print(results)

    return df
Beispiel #9
0
def test_experiment_class():
    # 5,6,7,10,13,14 are codes for executed and imagined hands/feet
    subject_id = 1
    event_codes = [5, 6, 9, 10, 13, 14]

    # This will download the files if you don't have them yet,
    # and then return the paths to the files.
    physionet_paths = mne.datasets.eegbci.load_data(subject_id, event_codes)

    # Load each of the files
    parts = [
        mne.io.read_raw_edf(path,
                            preload=True,
                            stim_channel='auto',
                            verbose='WARNING') for path in physionet_paths
    ]

    # Concatenate them
    raw = concatenate_raws(parts)

    # Find the events in this dataset
    events, _ = mne.events_from_annotations(raw)

    # Use only EEG channels
    eeg_channel_inds = mne.pick_types(raw.info,
                                      meg=False,
                                      eeg=True,
                                      stim=False,
                                      eog=False,
                                      exclude='bads')

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(raw,
                         events,
                         dict(hands=2, feet=3),
                         tmin=1,
                         tmax=4.1,
                         proj=False,
                         picks=eeg_channel_inds,
                         baseline=None,
                         preload=True)

    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    train_set = SignalAndTarget(X[:60], y=y[:60])
    test_set = SignalAndTarget(X[60:], y=y[60:])

    train_set, valid_set = split_into_two_sets(train_set,
                                               first_set_fraction=0.8)

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = train_set.X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(in_chans=in_chans,
                            n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length=12).create_network()
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    optimizer = optim.Adam(model.parameters())

    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    print("{:d} predictions per input/trial".format(n_preds_per_input))

    # Iterator is used to iterate over datasets both for training
    # and evaluation
    iterator = CropsFromTrialsIterator(batch_size=32,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)

    # Loss function takes predictions as they come out of the network and the targets
    # and returns a loss
    def loss_function(preds, targets):
        return F.nll_loss(th.mean(preds, dim=2, keepdim=False), targets)

    # Could be used to apply some constraint on the models, then should be object
    # with apply method that accepts a module
    model_constraint = None
    # Monitors log the training progress
    monitors = [
        LossMonitor(),
        MisclassMonitor(col_suffix='sample_misclass'),
        CroppedTrialMisclassMonitor(input_time_length),
        RuntimeMonitor(),
    ]
    # Stop criterion determines when the first stop happens
    stop_criterion = MaxEpochs(4)
    exp = Experiment(model,
                     train_set,
                     valid_set,
                     test_set,
                     iterator,
                     loss_function,
                     optimizer,
                     model_constraint,
                     monitors,
                     stop_criterion,
                     remember_best_column='valid_misclass',
                     run_after_early_stop=True,
                     batch_modifier=None,
                     cuda=cuda)

    # need to setup python logging before to be able to see anything
    logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
                        level=logging.DEBUG,
                        stream=sys.stdout)
    exp.run()

    compare_df = pd.read_csv(
        StringIO(
            'train_loss,valid_loss,test_loss,train_sample_misclass,valid_sample_misclass,'
            'test_sample_misclass,train_misclass,valid_misclass,test_misclass\n'
            '14.167170524597168,13.910758018493652,15.945781707763672,0.5,0.5,'
            '0.5333333333333333,0.5,0.5,0.5333333333333333\n'
            '1.1735659837722778,1.4342904090881348,1.8664429187774658,0.4629567736185384,'
            '0.5120320855614973,0.5336007130124778,0.5,0.5,0.5333333333333333\n'
            '1.3168460130691528,1.60431969165802,1.9181344509124756,0.49298128342245995,'
            '0.5109180035650625,0.531729055258467,0.5,0.5,0.5333333333333333\n'
            '0.8465543389320374,1.280307412147522,1.439755916595459,0.4413435828877005,'
            '0.5461229946524064,0.5283422459893048,0.47916666666666663,0.5,'
            '0.5333333333333333\n0.6977059841156006,1.1762590408325195,1.2779350280761719,'
            '0.40290775401069523,0.588903743315508,0.5307486631016043,0.5,0.5,0.5\n'
            '0.7934166193008423,1.1762590408325195,1.2779350280761719,0.4401069518716577,'
            '0.588903743315508,0.5307486631016043,0.5,0.5,0.5\n0.5982189178466797,'
            '0.8581563830375671,0.9598925113677979,0.32032085561497325,0.47660427807486627,'
            '0.4672905525846702,0.31666666666666665,0.5,0.4666666666666667\n0.5044312477111816,'
            '0.7133197784423828,0.8164243102073669,0.2591354723707665,0.45699643493761144,'
            '0.4393048128342246,0.16666666666666663,0.41666666666666663,0.43333333333333335\n'
            '0.4815250039100647,0.6736412644386292,0.8016976714134216,0.23413547237076648,'
            '0.39505347593582885,0.42932263814616756,0.15000000000000002,0.41666666666666663,0.5\n'
        ))

    for col in compare_df:
        np.testing.assert_allclose(np.array(compare_df[col]),
                                   exp.epochs_df[col],
                                   rtol=1e-3,
                                   atol=1e-4)