Exemplo n.º 1
0
def Test_model(Run, act, batch_size, Learning_rate):
    Options = {'data_path': '../lab2', 'model': ['EEGNet', 'DeepConvNet']}
    if (Run == 'EEGNet'):
        model_name = Run
        activation = act
        model = EEGNet(activation).cpu()
    else:
        model_name = Run
        activation = act
        model = DeepConvNet(activation).cpu()
    Path = './' + model_name
    os.chdir(Path)
    PATH = model_name + '_' + activation + '.pkl'  #'.pt'
    model.load_state_dict(torch.load(PATH))
    model.eval()
    # Hyper Parameter setting
    Batch_size = batch_size
    lr = Learning_rate
    loss = nn.CrossEntropyLoss()

    train_loader, test_loader, train_size, test_size = dataloader.read_bci_data(
        Options['data_path'], Batch_size)
    # Testing
    test_acc = 0.0
    test_loss = 0.0
    model.eval()
    for i, data in enumerate(test_loader):
        test_pred = model(data[0].cpu())
        batch_loss = loss(test_pred, data[1].cpu())
        test_acc += np.sum(
            np.argmax(test_pred.cpu().data.numpy(), axis=1) == data[1].numpy())
        test_loss += batch_loss.item()
    test_acc = test_acc / test_size * 100.0
    print('Test Accuracy : {:.2f}%'.format(test_acc))
Exemplo n.º 2
0
def iter_test_act(model_func):
    act_map = {
        'Relu': torch.nn.ReLU,
        'Leaky_relu': torch.nn.LeakyReLU,
        'Elu': torch.nn.ELU
    }
    max_epoch = 300
    # Load data
    t_data, t_lbl, v_data, v_lbl = dataloader.read_bci_data('./dataset')
    # Loss function
    criterion = torch.nn.CrossEntropyLoss()
    # Recorder of train & val for different activation
    act_rec = dict()
    # Load model
    for name, act in act_map.items():
        print('-' * 77)
        print(f"[ INFO ]Testing activation function: {name}")
        model = model_func(activation=act)
        model.to('cuda:0')
        # Optimizer
        optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
        act_rec[name] = train_val(model, criterion, optimizer, max_epoch,
                                  t_data, t_lbl, v_data, v_lbl)

    return act_rec
Exemplo n.º 3
0
def main():
    import dataloader
    model = EEGNet()
    model.eval()
    print(model)
    data, _, _, _ = dataloader.read_bci_data('./dataset')
    x = torch.Tensor(data[0:1])
    print(model(x))
Exemplo n.º 4
0
def get_bci_dataloaders():
    train_x, train_y, test_x, test_y = read_bci_data()
    datasets = []
    for train, test in [(train_x, train_y), (test_x, test_y)]:
        train = torch.stack(
            [torch.Tensor(train[i]) for i in range(train.shape[0])])
        test = torch.stack(
            [torch.Tensor(test[i:i + 1]) for i in range(test.shape[0])])
        datasets += [TensorDataset(train, test)]

    return datasets
Exemplo n.º 5
0
def eval_model(model_func):
    act_map = {
        'Relu': torch.nn.ReLU,
        'Leaky_relu': torch.nn.LeakyReLU,
        'Elu': torch.nn.ELU
    }
    # Load data
    t_data, t_lbl, v_data, v_lbl = dataloader.read_bci_data('./dataset')
    # Load model
    for name, act in act_map.items():
        model = model_func(activation=act)
        try:
            weight = torch.load('./weights/' + model.name + model.act_name +
                                '.pt')
        except FileNotFoundError:
            print(f'You have not saved weights for {model.name}')
            sys.exit()
        model.load_state_dict(weight)
        model.to('cuda:0')
        # Test
        model.eval()
        _, val_acc = inference(model, v_data, v_lbl)
        print(f"{name}: validation accuracy: {val_acc:.2%}")
Exemplo n.º 6
0
        )
        self.classify = nn.Sequential(
            nn.Linear(in_features=736, out_features=2, bias=True))

    def forward(self, x):
        x = self.firstconv(x)
        x = self.depthwiseConv(x)
        x = self.separableConv(x)
        x = x.view(-1, 32 * 23)
        x = self.classify(x)

        return x


if __name__ == '__main__':
    X_train, y_train, X_test, y_test = dataloader.read_bci_data()
    print("Training data: ", X_train.shape)
    print("Training label: ", y_train.shape)
    print("Testing data: ", X_test.shape)
    print("Testing label: ", y_test.shape)
    print("-" * 50)

    train = LoadData(X_train, y_train)
    train_loader = torch.utils.data.DataLoader(
        train,
        batch_size=64,
        shuffle=False,
        drop_last=False,
        pin_memory=True,
    )
    test = LoadData(X_test, y_test)
Exemplo n.º 7
0
    plt.show()
    
def gen_dataset(train_x, train_y, test_x, test_y):
    datasets = []
    for x, y in [(train_x, train_y), (test_x, test_y)]:
        x = torch.stack(
            [torch.Tensor(x[i]) for i in range(x.shape[0])]
        )
        y = torch.stack(
            [torch.Tensor(y[i:i+1]) for i in range(y.shape[0])]
        )
        datasets += [TensorDataset(x, y)]
        
    return datasets

train_dataset, test_dataset = gen_dataset(*dataloader.read_bci_data())

class EEGNet(nn.Module):
    def __init__(self, activation=None, dropout=0.25):
        super(EEGNet, self).__init__()
        
        if not activation:
            activation = nn.ELU
        
        self.firstconv = nn.Sequential(
            nn.Conv2d(
                1, 16, kernel_size=(1, 51),
                stride=(1,1), padding=(0,25), bias=False
            ),
            nn.BatchNorm2d(16)
        )
Exemplo n.º 8
0
def main(args):
    train_x, train_y, test_x, test_y = read_bci_data()
    torch_dataset = Data.TensorDataset(
        torch.from_numpy(train_x.astype(np.float32)),
        torch.from_numpy(train_y.astype(np.float32)))
    train_loader = Data.DataLoader(dataset=torch_dataset,
                                   batch_size=args.batch,
                                   shuffle=True)

    net_dict = choose_net(args)
    model_fullname = {'eeg': 'EEGNet', 'dcn': 'DeepConvNet'}
    acc_dict = {}
    if args.load:
        net_dict['relu'][0].load_state_dict(torch.load(args.load))
        net_dict['relu'][0].eval()
        test_accuracy = cal_accuracy(net_dict['relu'][0], test_x, test_y)
        print('test_acc: {:.4f}%'.format(test_accuracy * 100))
        return
    # net[0]: model, net[1]: optimizer, net[2]: loss_function
    for key, net in net_dict.items():
        acc_dict['train_{}'.format(key)] = []
        acc_dict['test_{}'.format(key)] = []
        optimizer, loss_func = handle_param(args, net[0])
        scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                         milestones=[50, 150],
                                                         gamma=0.5)
        net.extend([optimizer, loss_func, scheduler])
    max_acc = 0
    file_name = '{}_lr{}_ep{}'.format(args.model, args.learning_rate,
                                      args.epochs)
    # start training
    for epoch in range(args.epochs):
        print('-' * 10, 'epoch', epoch + 1, '-' * 10)
        loss_dict = {}
        for key in net_dict.keys():
            loss_dict[key] = []
        # training
        for b_x, b_y in train_loader:
            for key, net in net_dict.items():
                # apply scheduler
                net[3].step()
                output = net[0](b_x)
                loss = net[2](output, b_y.long())
                loss_dict[key].append(loss.data.numpy())
                net[1].zero_grad()
                loss.backward()
                net[1].step()
        # show loss and accuracy
        for key, net in net_dict.items():
            net[0].eval()
            train_accuracy = cal_accuracy(net[0], train_x, train_y)
            test_accuracy = cal_accuracy(net[0], test_x, test_y)
            if test_accuracy > max_acc:
                max_acc = test_accuracy
                torch.save(net[0].state_dict(), file_name + '.pkl')
            acc_dict['train_{}'.format(key)].append(train_accuracy)
            acc_dict['test_{}'.format(key)].append(test_accuracy)
            print('---------- {} ({}) ----------'.format(
                model_fullname[args.model], key))
            print(
                'training loss: {:.6f} | train_acc: {:.6f} | test_acc: {:.6f}'.
                format(max(loss_dict[key]), train_accuracy, test_accuracy))
            net[0].train()
    print('max_acc: {}'.format(max_acc))

    # save / show result
    # show_result(range(args.epochs), acc_dict, 'EEG')
    with open(file_name + '.json', 'w') as f:
        json.dump(
            {
                'x': list(range(args.epochs)),
                'y_dict': acc_dict,
                'title': model_fullname[args.model],
            },
            f,
            cls=NumpyEncoder)
Exemplo n.º 9
0
        self.label = torch.from_numpy(label).float()

    def __getitem__(self, index):
        return self.data[index], self.label[index]

    def __len__(self):
        return len(self.data)


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

batch_size = 128
leanring_rate = 1e-3
epochs = 1000

train_data, train_label, test_data, test_label = dataloader.read_bci_data()

my_train_data = MyDataset(train_data, train_label)

my_test_data = MyDataset(test_data, test_label)

train_data_loader = torch.utils.data.DataLoader(my_train_data,
                                                batch_size=batch_size,
                                                shuffle=True)

test_data_loader = torch.utils.data.DataLoader(my_test_data,
                                               batch_size=batch_size,
                                               shuffle=True)

x_axis = [i + 1 for i in range(epochs)]
Exemplo n.º 10
0
parser.add_argument('--epochs',
                    default=3000,
                    type=int,
                    help="number of total epochs to run")
parser.add_argument('--lr',
                    '--learning-rate',
                    default=1e-3,
                    type=float,
                    help="initial learning rate")
parser.add_argument('--checkpoint', type=str, help="name of checkpoint file")
args = parser.parse_args()

CHECKPOINT = args.checkpoint

# Prepare for data
train_X, train_y, test_X, test_y = dataloader.read_bci_data()
train_X = torch.from_numpy(train_X).float()
train_y = torch.from_numpy(np.array(train_y)).long()
test_X = torch.from_numpy(test_X).float()
test_y = torch.from_numpy(np.array(test_y)).long()

# DataLoader
train_dataset = TensorDataset(train_X, train_y)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size)

# Construct desired model
model = None
if args.model == "EEGNet":
    model = EEGNet(activation_function=args.activation_function)
elif args.model == "DeepConvNet":
    model = DeepConvNet(activation_function=args.activation_function)
Exemplo n.º 11
0
import numpy as np

# print(torch.cuda.is_available())

# hyper parameters
epochs = 2000
batch_size = 64
lr = 1e-2
activation_functions = {"ReLU":nn.ReLU(), "LeakyReLU":nn.LeakyReLU(), "ELU":nn.ELU()}

# history
acc_history   = {"ReLU":{}, "LeakyReLU":{}, "ELU":{}}
best_test_acc = {"ReLU":0 , "LeakyReLU":0 , "ELU":0 }

# load data
train_X, train_Y, test_X, test_Y = read_bci_data()
train_data = DataLoader(list(zip(train_X, train_Y)), batch_size=batch_size, shuffle=True, num_workers=0)
test_data  = DataLoader(list(zip(test_X, test_Y))  , batch_size=batch_size, shuffle=True, num_workers=0)

def evaluate(net, data):
    acc_count = 0
    data_count = 0
    for idx, data_batch in enumerate(data):
        inputs = data_batch[0].float().cuda(0)
        labels = data_batch[1].long()
        outputs = net(inputs)
        predicts = np.argmax(outputs.cpu().detach().numpy(), axis = 1)
        for idx in range(len(predicts)):
            if predicts[idx] == labels[idx]:
                acc_count +=1
            data_count+=1
Exemplo n.º 12
0
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as utils

import matplotlib.pyplot as plt
import numpy as np

from dataloader import read_bci_data

train_data, train_label, test_data, test_label = read_bci_data()

CUDA = False
if torch.cuda.is_available():
    device = torch.device("cuda")
    CUDA = True
    print("Cuda Available !")
	
tensor_train_data = torch.stack([torch.Tensor(i) for i in train_data]) # transform to torch tensors
tensor_train_label = torch.from_numpy(train_label)

dataset = utils.TensorDataset(tensor_train_data, tensor_train_label) # create your datset
dataloader = utils.DataLoader(dataset, batch_size=64, shuffle=True, num_workers=4) # create your dataloader

tensor_test_data = torch.stack([torch.Tensor(i) for i in test_data]) # transform to torch tensors
tensor_test_label = torch.from_numpy(test_label)

test_dataset = utils.TensorDataset(tensor_test_data, tensor_test_label) # create your datset
test_dataloader = utils.DataLoader(test_dataset, batch_size=64, shuffle=False, num_workers=4) # create your dataloader
Exemplo n.º 13
0
def Process(model, activation, batch_size, epoch, Learning_rate):
    Options = {'data_path': './lab2', 'model': ['EEGNet', 'DeepConvNet']}

    # Model initialization
    Run = model  #int(input())
    if (Run == 'EEGNet'):
        model_name = Options['model'][0]
        activation_function = activation
        model = EEGNet(activation_function)
        model.cuda()
    else:
        model_name = Options['model'][1]
        activation_function = activation
        model = DeepConvNet(activation_function)
        model.cuda()


# Hyper Parameter setting
    Batch_size = batch_size
    num_epochs = epoch
    lr = Learning_rate
    loss = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    # plot file setting
    train_record = model_name + '_' + activation_function + '_train.csv'
    test_record = model_name + '_' + activation_function + '_test.csv'
    model_weight = model_name + '_' + activation_function + '.pkl'  #'.pt'
    print('Dealing with ' + train_record)
    print('Dealing with ' + test_record)

    # Read file, get the data which are wrapped into dataloader
    train_loader, test_loader, train_size, test_size = dataloader.read_bci_data(
        Options['data_path'], Batch_size)
    os.chdir('../' + model_name)
    # Training & Testing & 寫入檔案
    with open(train_record, 'w', newline='') as csvfile:
        write_train = csv.writer(csvfile)
        with open(test_record, 'w', newline='') as csvFile:
            write_test = csv.writer(csvFile)

            for epoch in range(num_epochs):
                # Training
                train_acc = 0.0
                train_loss = 0.0
                model.train()
                for i, data in enumerate(train_loader):
                    optimizer.zero_grad()
                    #Variable()
                    train_pred = model(data[0].cuda())
                    batch_loss = loss(train_pred, data[1].cuda())
                    batch_loss.backward()
                    optimizer.step()

                    train_acc += np.sum(
                        np.argmax(train_pred.cuda().data.numpy(), axis=1) ==
                        data[1].numpy())
                    train_loss += batch_loss.item()
                train_acc = train_acc / train_size * 100.0
                print('# {} epoch, Train Accuracy : {:.2f}%'.format(
                    epoch + 1, train_acc))
                write_train.writerow([epoch, train_acc])

                # Testing
                test_acc = 0.0
                test_loss = 0.0
                model.eval()
                for i, data in enumerate(test_loader):
                    test_pred = model(data[0].cuda())
                    batch_loss = loss(test_pred, data[1].cuda())

                    test_acc += np.sum(
                        np.argmax(test_pred.cuda().data.numpy(), axis=1) ==
                        data[1].numpy())
                    test_loss += batch_loss.item()
                test_acc = test_acc / test_size * 100.0
                print('# {} epoch, Test Accuracy : {:.2f}%'.format(
                    epoch + 1, test_acc))
                write_test.writerow([epoch, test_acc])
    torch.save(model.state_dict(), model_weight)
Exemplo n.º 14
0
import argparse
import dataloader

import numpy as np

from EEGNet import EEGNet

# Argparse
parser = argparse.ArgumentParser(description=f'Runnning EEG Classification')
parser.add_argument('-a', '--activation-function', default="LeakyReLU", type=str, help="desired type of activation function")
parser.add_argument('-m', '--model', default="EEGNet", type=str, help="select model")
parser.add_argument('--checkpoint', default="./checkpoint/EEGNet_demo.pth", type=str, help="name of checkpoint file")
args = parser.parse_args()

# Prepare for data
_, _, test_X, test_y = dataloader.read_bci_data()
test_X = torch.from_numpy(test_X).float()
test_y = torch.from_numpy(np.array(test_y)).long()

# Select model
model = EEGNet(args.activation_function)

# go into evaluation mode (mainly for dropout & batch_normalization)
model.eval()

# Load weight file
model = model.cuda()
checkpoint = torch.load(args.checkpoint)
model.load_state_dict(checkpoint['state_dict'])

test_X = test_X.cuda()
Exemplo n.º 15
0
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    print('Accuracy: %f %%' % (100 * correct / total), comment)
    return (100.0 * correct) / total


MAX_EPOCH = 100
LR = 0.01

#path = 'G:/VM_SYNC/Deeplearning/lab2'
#os.chdir('G:/VM_SYNC/Deeplearning/lab2')

device = torch.device('cpu')
a, b, c, d = dataloader.read_bci_data()

train_x = torch.tensor(a, dtype=torch.float, device=device)
train_y = torch.tensor(b, dtype=torch.long, device=device)

test_x = torch.tensor(c, dtype=torch.float, device=device)
test_y = torch.tensor(d, dtype=torch.long, device=device)
test_dataset = Data.TensorDataset(test_x, test_y)
test_loader = Data.DataLoader(dataset=test_dataset,
                              batch_size=1080,
                              shuffle=False,
                              num_workers=0)

#netA = DeepConvNet('ELU')
#netB = EGG('ELU')
#input()
Exemplo n.º 16
0
if __name__ == '__main__':
    # parameters
    lr_step = 200
    args, name = parser()
    LR = args.lr
    EPOCH = args.EPOCH
    #BSIZE = args.BSIZE
    BSIZE = 2000
    GAMMA = args.GAMMA
    MILESTONES = [int(ms) for ms in args.MILESTONES]
    print(name)

    device = torch.device(f'cuda:{args.gpu}')

    # load data
    tr_X, tr_y, ts_X, ts_y = read_bci_data()
    tr_X, tr_y, ts_X, ts_y = torch.from_numpy(tr_X), torch.from_numpy(
        tr_y), torch.from_numpy(ts_X), torch.from_numpy(ts_y)
    train_dataloader = Data.DataLoader(Data.TensorDataset(tr_X, tr_y),
                                       batch_size=BSIZE)
    test_dataloader = Data.DataLoader(Data.TensorDataset(ts_X, ts_y),
                                      batch_size=len(ts_y))

    # model
    loss_func = nn.CrossEntropyLoss()
    if args.MODEL == 'EEG':
        model = EEGNet(args.activate)
    elif args.MODEL == 'deep':
        model = DeepConvNet(args.activate)
    model = model.to(device)
    #print(model)