def load_eeg_data(feature_dim_last=True, standardize=True, one_khz=True):
    """
    Loads the EEG train and test data.

    Args:

        feature_dim_last (boolean, optional): If True, switch the time and feature dimensions

        standardize (boolean, optional): If True, train and test data are standardized

        one_khz (boolean, optional): If True, creates dataset from the 1000Hz data instead
            of the default 100Hz.

    """
    train_input, train_target = bci.load(root=DATA_PATH, train=True, one_khz=one_khz)
    test_input, test_target = bci.load(root=DATA_PATH, train=False, one_khz=one_khz)

    if feature_dim_last:
        train_input = train_input.permute(0,2,1)
        test_input = test_input.permute(0,2,1)

    if standardize:
        train_input, mean, std_dev = standardize_data(train_input)
        test_input, _, _ = standardize_data(test_input, mean, std_dev)

    return train_input, train_target, test_input, test_target
Esempio n. 2
0
def import_data(folder='./data_bci',
                use_full_data=False,
                normalize=True,
                cuda=True):
    """Utility function to import the data.
    :argument folder where to store the data
    :argument use_full_data boolean to use the 100Hz data (False) or the full 1KHz data (True)
    :argument normalize normalize the data with the train mean and std"""

    train_input, train_target = bci.load(root=folder, one_khz=use_full_data)
    test_input, test_target = bci.load(root=folder,
                                       train=False,
                                       one_khz=use_full_data)

    if torch.cuda.is_available() and cuda:
        train_input, train_target = train_input.cuda(), train_target.cuda()
        test_input, test_target = test_input.cuda(), test_target.cuda()

    train_input, train_target = train_input, train_target
    test_input, test_target = test_input, test_target

    if (normalize):
        (train_input, train_target), (test_input, test_target) = \
            normalize_data((train_input, train_target), (test_input, test_target))

    return train_input, train_target, test_input, test_target
Esempio n. 3
0
def generate_data(PATH):
    """
        This function generate the BCI data
        Args:
            PATH: path to the derectory where the data is stored
    """
    train_input, train_target = bci.load(root=PATH)
    test_input, test_target = bci.load(root=PATH, train=False)
    logging.info('[+][+][+] data generated')

    return train_input, train_target, test_input, test_target
def getTrainData():
    train_input, train_target = bci.load(root='./data_bci')
    mean, std = train_input.mean(), train_input.std()
    train_input.sub_(mean).div_(std)
    train_input, train_target = Variable(
        train_input, requires_grad=True), Variable(train_target)
    return train_input, train_target
Esempio n. 5
0
def load_dataset(train=True):
    ''' Return the standard version of the dataset
    '''
    dataset, target = dlc_bci.load('../data', train=train)

    dataset = Variable(dataset)
    target_dataset = Variable(target)

    return dataset, target_dataset
Esempio n. 6
0
def import_data(flatten=False, one_khz = False, train_size=270):
    train_input , train_target = bci.load(root = './ data_bci', one_khz=one_khz)
    print("Original data format: ")
    print ( str ( type ( train_input ) ) , train_input.size() )
    print ( str ( type ( train_target ) ) , train_target.size() )
    test_input , test_target = bci.load(root = './ data_bci', train = False, one_khz=one_khz)
    print ( str ( type ( test_input ) ) , test_input.size())
    print ( str ( type ( test_target ) ) , test_target.size())
    
    # split traindata into train and validation datasets
    val_input, val_target = train_input[train_size:], train_target[train_size:]
    train_input, train_target = train_input[:train_size], train_target[:train_size]

    # if flatten the "discard" the timeinformation, and just look at every feature at a certain time as a feature
    if flatten == True:
        train_input = flatten_input_data(train_input)
        val_input = flatten_input_data(val_input)
        test_input = flatten_input_data(test_input)

    train_data = TensorDataset(train_input, train_target)
    val_data = TensorDataset(val_input, val_target)
    test_data = TensorDataset(test_input, test_target)
    
    # One-hotting targets
    labels = train_data.target_tensor
    train_data.target_tensor = torch.LongTensor(labels.size(0), 2).zero_().scatter_(1, labels.view(-1, 1), 1)

    labels = val_data.target_tensor
    val_data.target_tensor = torch.LongTensor(labels.size(0), 2).zero_().scatter_(1, labels.view(-1, 1), 1)
    
    labels = test_data.target_tensor
    test_data.target_tensor = torch.LongTensor(labels.size(0), 2).zero_().scatter_(1, labels.view(-1, 1), 1)
    
    print("Modified train_data.data_tensor shape: ", train_data.data_tensor.shape)
    print("Modified train_data.target_tensor shape: ", train_data.target_tensor.shape)
    print("val_data.data_tensor shape: ", val_data.data_tensor.shape)
    return train_data, val_data, test_data
Esempio n. 7
0
 def __init__(self, data_path = './data_bci', train = True, one_khz = False, filter = False, robust_scaler = False, 
             num_samples = 20, shift = 10, force_cpu = False):
     # Load data
     self.input, self.target =  dlc_bci.load(root = data_path, one_khz = one_khz, train = train)
     self.train = train
     self.force_cpu = force_cpu
     
     print('Input data loaded (size = {})'.format(self.input.shape))
     print('Target data loaded (size = {})'.format(self.target.shape))
     
     #Filtering
     if filter:
         if one_khz:
             fs = 1000
         else:
             fs = 100
         self.input = preprocessing(self.input, ignore_outliers = robust_scaler, fs = fs)
         
         if torch.cuda.is_available() and not force_cpu:
             self.input = self.input.cuda()
Esempio n. 8
0
def load_dataset_1000hz(train=True):
    ''' Return the 1000hz version of the dataset

    The dataset is downsampled. The original size of each sample is 28x500.
    The dataset that is returned is sampled with dilation 10 and stride 1 from the original dataset.
    The new size of each sample is 28x50 and there will be 10 times more data.
    '''
    dataset, target = dlc_bci.load('../data', train=train, one_khz=True)

    downsampled_dataset = []
    downsampled_target = []
    for i in range(10):
        indexes = range(i, dataset.shape[2], 10)
        downsampled_dataset.append(dataset[:, :, indexes])
        downsampled_target.append(target)

    downsampled_dataset = torch.cat(downsampled_dataset)
    downsampled_target = torch.cat(downsampled_target)

    downsampled_dataset = Variable(downsampled_dataset)
    downsampled_target = Variable(downsampled_target)

    return downsampled_dataset, downsampled_target
Esempio n. 9
0
"""
Created on Mon Apr 30 16:02:13 2018

@author: Bob
"""
import torch
from torch.autograd import Variable
from torch import nn

import Nets
import Validate
import Visualizations as Vis
import dlc_bci as bci

###Fetch the data
train_input, train_target = bci.load(root='./ data_bci')
print('train_input:', str(type(train_input)), train_input.size())
print('train_target:', str(type(train_target)), train_target.size())
test_input, test_target = bci.load(root='./data˙bci', train=False)
print('test_input:', str(type(test_input)), test_input.size())
print('test target:', str(type(test_target)), test_target.size())

# Setting the data to the Variable type
train_input, train_target = Variable(train_input), Variable(train_target)
test_input, test_target = Variable(test_input), Variable(test_target)

# Use cuda if available
if torch.cuda.is_available():
    train_input, train_target = train_input.cuda(), train_target.cuda()
    test_input, test_target = test_input.cuda(), test_target.cuda()
Esempio n. 10
0
import torch.optim as optim
from torch import Tensor
from torch import nn
from torch.autograd import Variable

from utility import *
from models import *

np.random.seed(
    seed=7
)  #fix the random seed in order to be able to reproduce the results in the report

################################################################################################
################################### LOAD THE DATA ##############################################
################################################################################################
train_input_100, train_target_100 = dlc_bci.load(root='./data_bci_100Hz',
                                                 download=False)
test_input_100, test_target_100 = dlc_bci.load(root='./data_bci_100Hz',
                                               download=False,
                                               train=False)

train_input_1000, train_target_1000 = dlc_bci.load(root='./data_bci_1000Hz',
                                                   download=False,
                                                   one_khz=True)
test_input_1000, test_target_1000 = dlc_bci.load(root='./data_bci_1000Hz',
                                                 download=False,
                                                 train=False,
                                                 one_khz=True)

print("Train input 100 Hz: {:d}x{:d}x{:d}".format(
    *(s for s in train_input_100.size())))
print("Train target 100 Hz: {:d}".format(*(s
Esempio n. 11
0
from models.recurrent_model import RecurrentModel
from models.sequential_autopick import SequentialAutopick
from models.convolutional_model import ConvolutionalModel
import dlc_bci as bci
import argparse
from util.configuration import get_args, get_model, setup_log
from util.data_util import *
from run import run_model, test_model, run_k_fold, train_model
import time
import math
import numpy as np

opt = get_args(argparse.ArgumentParser())
log = setup_log(opt)

train_̇input, train_̇target = bci.load(root='./data', train=True, store_local=True, one_khz=opt['one_khz'])
test_input, test_target = bci.load(root='./data', train=False, store_local=True, one_khz=opt['one_khz'])

split = math.floor(train_̇input.size()[0]/ opt['k_fold'])

train_dataset = Dataset(opt, train_̇input, train_̇target, log, 'train')
test_dataset = Dataset(opt, test_input, test_target, log, 'test')

# toy_input, toy_target = generate_toy_data()
# toy_dataset = Dataset(toy_input, toy_target, 'train', remove_DC_level=False, normalize=False)

log.info('[Data loaded.]')

opt['model'] = 'Recurrent'
dropout = [0.2, 0.5]
hidden_units = [20]
Esempio n. 12
0
import os
import torch
from torch import Tensor, nn, optim
from torch.nn import functional as F
from torch.autograd import Variable

import matplotlib.pyplot as plt

from dlc_bci import load

root_dir = os.getcwd()

# Load train, test data
#316x28x50
train_input, train_target = load(root_dir, train=True)
#100x28x50
test_input, test_target = load(root_dir, train=False)

#train_target = train_target.type(FloatTensor)
#test_target = test_target.type(FloatTensor)

train_input, train_target = Variable(train_input), Variable(train_target)
test_input, test_target = Variable(test_input), Variable(test_target)

# limit train to 300 for batch gradient
train_input = train_input[:300]
train_target = train_target[:300]

"""
Input: N x 28 x 50
Conv1: N x 32 x 45
def getTestData():
    test_input, test_target = bci.load(root='./data_bci', train=False)
    mean, std = test_input.mean(), test_input.std()
    test_input.sub_(mean).div_(std)
    test_input, test_target = Variable(test_input), Variable(test_target)
    return test_input, test_target
Esempio n. 14
0
##############################################################################
# base parameters
##############################################################################
max_epochs=50
batchsize=2
torch.manual_seed(42)
np.random.seed(42)
verbose = False
##############################################################################
# Load data/target of train/test
##############################################################################
import dlc_bci as bci


train_input , train_target = bci.load(root='./dataset/',one_khz = False)
test_input , test_target = bci.load(root = './dataset/', one_khz = False,train = False )
# k-fold cross validation
kf = KFold(n_splits=5, random_state=42)

train_dataset=torch.utils.data.TensorDataset(train_input, train_target)
test_dataset=torch.utils.data.TensorDataset(test_input, test_target)

# Base model
class Net(nn.Module):

    def __init__(self,nb_hidden, p_drop=0, nb_hidden2=1000):
        super(Net, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=5, padding=2),
            nn.BatchNorm2d(16),
Esempio n. 15
0
import dlc_bci as bci
import numpy as np
import torch
from torch.autograd import Variable

from neural_1d import Net_1D
from weight_initial import weights_init
from data_augmentation import *
from helper import *

# Import data
train_input_original, train_target_original = bci.load(root='./data_bci')
test_input, test_target = bci.load(root='./data_bci', train=False)

# Data augmentation by sampling and interpolation to form new data
data_resample = interpolation(train_input_original, 50, 0)
train_input = np.concatenate(
    (np.array(train_input_original), data_resample[0:200]), axis=0)
train_input = torch.Tensor(train_input)
train_target = np.concatenate(
    (np.array(train_target_original), np.array(train_target_original[0:200])),
    axis=0)
train_target = torch.LongTensor(train_target)

# Normalization
mu, std = train_input.mean(), train_input.std()
train_input.sub_(mu).div_(std)
test_input.sub_(mu).div_(std)

# Convert to Variable
train_input, train_target = Variable(train_input), Variable(train_target)
import torch
from torch import Tensor
from torch.autograd import Variable

import numpy as np
np.random.seed(1)

import dlc_bci as bci
from helpers import *
from modules import *

print("Importing and preprocessing the data...")
# import full data sampled at 1Khz
train_input, train_target = bci.load(root='./data_bci', one_khz=True)
test_input, test_target = bci.load(root='./data_bci',
                                   train=False,
                                   one_khz=True)

# normalization
train_mean = train_input.mean(2).mean(0).unsqueeze(1).expand(
    -1, train_input.size(2))
train_std = train_input.std(2).std(0).unsqueeze(1).expand(
    -1, train_input.size(2))

test_mean = test_input.mean(2).mean(0).unsqueeze(1).expand(
    -1, test_input.size(2))
test_std = test_input.std(2).std(0).unsqueeze(1).expand(-1, test_input.size(2))

train_input.sub_(train_mean)
test_input.sub_(test_mean)
Esempio n. 17
0
import torch
from torch import nn
from torch.autograd import Variable

# customized libraries
import dlc_bci as bci
import plot_lib as plib
import preprocess as prep
from nn_models import ConvNet3, LSTM

if __name__ == "__main__":

    print("Loading the dataset......")
    # load dataset
    tr_input_org, tr_target_org = bci.load("bci", train=True, one_khz=False)
    te_input_org, te_target_org = bci.load("bci", train=False, one_khz=False)

    # normalization
    tr_input_org = torch.nn.functional.normalize(tr_input_org, p=2, dim=0)
    te_input_org = torch.nn.functional.normalize(te_input_org, p=2, dim=0)

    # create outputs with one hot encoding
    tr_target_onehot = prep.convert_to_one_hot_labels(tr_input_org,
                                                      tr_target_org)
    te_target_onehot = prep.convert_to_one_hot_labels(te_input_org,
                                                      te_target_org)

    # convert output to variable
    tr_target_onehot = Variable(tr_target_onehot)
    te_target_onehot = Variable(te_target_onehot)
Esempio n. 18
0
# imports from this same folder
import dlc_bci as bci
import baseline
from nets import MyNet
from nets import MyNet2
from nets import MyNet3
import helpers
from helpers import BCELoss
from helpers import N_true

print('Version of PyTorch:', torch.__version__,', and we used: 0.4.0')


##### Import data
big_data = True # takes the hight resolution signal
train_input, train_target = bci.load(root ="./data_bci",download=True, one_khz=big_data)
# do the predictions on the low resolution signal
test_input, test_target = bci.load(root ="./data_bci", train = False, download=True, one_khz=False)
# prints infos about the shapes of the datasets
print(str(type(train_input)), train_input.size())
print(str(type(train_target)), train_target.size())
print(str(type(test_input)), test_input.size())
print(str(type(test_target)), test_target.size())


##### normalize the data
train_input = (train_input - torch.mean(train_input,0,keepdim=True))/torch.std(train_input,0,True)
test_input = (test_input - torch.mean(test_input,0,keepdim=True))/torch.std(test_input,0,True)

##### Make non-neural-network baselines
print('---------- BASELINES ----------')