Exemplo n.º 1
0
from __future__ import print_function
import sys
import numpy as np
import chb

subject = chb.load_dataset(sys.argv[1], tiger=True)
sys.stdout.flush()
for sznum in range(1, subject.get_num() + 1):
    train, trainlab, test, testlab = chb.loo_epoch(subject, sznum)

    print('Seizure %d of %d' % (sznum, subject.get_num()))
    print(train.shape)
    print(trainlab.shape)
    print(sum(trainlab))
    print(test.shape)
    print(testlab.shape)
    print(sum(testlab))
    sys.stdout.flush()
Exemplo n.º 2
0
from __future__ import print_function

import sys
import os

import numpy as np

import chb

subjname = sys.argv[1]

subject = chb.load_dataset(subjname, tiger=True)
sys.stdout.flush()
print(subjname, 'rec shape:\n')
for idx, eeg in enumerate(subject):
    print('%d:\t%s' % (idx + 1, eeg.get_rec().shape))
    sys.stdout.flush()
print('-' * 80)
print()
sys.stdout.flush()
Exemplo n.º 3
0
def main(subjname, compressed=True):

    subject = chb.load_dataset(subjname, tiger=True)
    sys.stdout.flush()

    op1 = set(['chb14', 'chb20', 'chb21', 'chb22'])
    op2 = set(['chb18', 'chb19'])
    op3 = set(['chb17'])
    op4 = set(['chb16'])
    op5 = set(['chb11'])
    op6 = set(['chb04'])
    op7 = set(['chb09'])

    if subjname in op1:
        for eeg in subject:
            eeg.add_rec(np.delete(eeg.get_rec(), [4, 9, 12, 17, 22], 0))
    elif subjname in op2:
        del subject[0]
        newsubj = chb.CHBsubj(subject.get_name())
        for eeg in subject:
            eeg.add_rec(np.delete(eeg.get_rec(), [4, 9, 12, 17, 22], 0))
            newEEG = eeg.copy_meta()
            newsubj.add_file(newEEG)
        pklname = pth + subjname + '.p'
        os.remove(pklname)
        pickle.dump(newsubj, open(pklname, 'wb'))
    elif subjname in op3:
        del subject[-1]
        newsubj = chb.CHBsubj(subject.get_name())
        for eeg in subject:
            eeg.add_rec(np.delete(eeg.get_rec(), [4, 9, 12, 17, 22], 0))
            newEEG = eeg.copy_meta()
            newsubj.add_file(newEEG)
        pklname = pth + subjname + '.p'
        os.remove(pklname)
        pickle.dump(newsubj, open(pklname, 'wb'))
    elif subjname in op4:
        del subject[-2:]
        newsubj = chb.CHBsubj(subject.get_name())
        for eeg in subject:
            eeg.add_rec(np.delete(eeg.get_rec(), [4, 9, 12, 17, 22], 0))
            newEEG = eeg.copy_meta()
            newsubj.add_file(newEEG)
        pklname = pth + subjname + '.p'
        os.remove(pklname)
        pickle.dump(newsubj, open(pklname, 'wb'))
    elif subjname in op5:
        for idx, eeg in enumerate(subject):
            if not idx:
                continue
            eeg.add_rec(np.delete(eeg.get_rec(), [4, 9, 12, 17, 22], 0))
    elif subjname in op6:
        for idx, eeg in enumerate(subject):
            if idx < 6:
                continue
            eeg.add_rec(np.delete(eeg.get_rec(), 23, 0))
    elif subjname in op7:
        for idx, eeg in enumerate(subject):
            if not idx:
                continue
            eeg.add_rec(np.delete(eeg.get_rec(), 23, 0))

    print('Saving...')
    savedict = {}
    for eeg in subject:
        savedict[eeg.get_name()] = eeg.get_rec()

    st = time.clock()
    savename = pth + subjname + '.npz'
    os.remove(savename)
    if compressed:
        np.savez_compressed(savename, **savedict)
    else:
        np.savez(savename, **savedict)
    print('Done: %f seconds elapsed.' % (time.clock() - st))
Exemplo n.º 4
0
def main(subject='chb05',
         num_epochs=10,
         thresh=0.5,
         osr=1,
         usp=0,
         tiger=False,
         tag='test',
         plotter=False):
    # Load the dataset
    subj = chb.load_dataset(subject, tiger=tiger)
    sys.stdout.flush()
    batch_size = 10

    num_szr = subj.get_num()
    test_accs = [0] * num_szr
    out_dict = {}
    for szr in range(1, num_szr + 1):
        print('\nLeave-One-Out: %d of %d' % (szr, num_szr))

        input_var = T.tensor4('inputs')
        target_var = T.ivector('targets')
        net = nw.simple(input_var)
        train_fn, val_fn, prob_fn = compile_model(input_var, target_var, net)

        train_err_list = [0] * num_epochs
        val_err_list = [0] * num_epochs

        print('=' * 80)
        print('| epoch\t\t| train loss\t\t| time\t')
        print('=' * 80)

        x_test, y_test = chb.loowinTest(subj, szr)
        for epoch in range(num_epochs):
            st = time.clock()
            # make generator
            data = chb.loowinTrain(subj, szr, osr, usp)\

            batch_train_errs = []
            for idx, batch in enumerate(data):
                x_train, y_train = batch
                err = train_fn(x_train, y_train)
                batch_train_errs.append(err)
            epoch_train_err = np.mean(batch_train_errs)
            train_err_list[epoch] = epoch_train_err

            en = time.clock()
            print('| %d \t\t| %.6f\t\t| %.2f s' %
                  (epoch + 1, epoch_train_err, en - st))
            sys.stdout.flush()
        print('-' * 80)

        print('Training Complete.\n')
        if plotter:
            fig = plt.figure()
            plt.plot(range(num_epochs), train_err, label='Training error')
            plt.plot(range(num_epochs), val_err, label='Validation error')
            plt.title('ConvNet Training')
            plt.xlabel('Epochs')
            plt.ylabel('Error')
            plt.legend()
            plt.show()

        test_err, y_pred, y_prob = nn_test(x_test, y_test, val_fn, prob_fn,
                                           batch_size, thresh)
        out_dict['_'.join(['prob', str(szr)])] = y_prob
        out_dict['_'.join(['true', str(szr)])] = y_test
        np.savez(
            ''.join(
                ['./outputs/', subject, 'model', 'LOO',
                 str(szr), tag, '.npz']),
            *lasagne.layers.get_all_param_values(net['out']))

    np.savez(''.join([subject, tag, '.npz']), **out_dict)
Exemplo n.º 5
0
def main(subject='chb05', num_epochs=10, tiger=False, plotter=False):
    # Load the dataset
    subj = chb.load_dataset(subject, tiger=tiger)
    sys.stdout.flush()
    batch_size = 10

    num_szr = subj.get_num()
    test_accs = [0] * num_szr
    out_dict = {}
    for szr in range(1, num_szr + 1):
        print('\nLeave-One-Out: %d of %d' % (szr, num_szr))

        input_var = T.tensor4('inputs')
        target_var = T.ivector('targets')
        net = nw.deep1(input_var)
        train_fn, val_fn, prob_fn = compile_model(input_var, target_var, net)

        train_err_list = [0] * num_epochs
        val_err_list = [0] * num_epochs

        print('=' * 80)
        print('| epoch \t| train loss\t| val loss\t| time\t')
        print('=' * 80)

        x_test, y_test = 0, 0
        for epoch in range(num_epochs):
            st = time.clock()
            # make generator
            loo_gen = chb.lgus(subj,
                               szr,
                               batchsec=60,
                               drop_prob=0.8,
                               shuffle=True)
            # get test data (same on every epoch, so not really using it until the
            # last go-round)
            for batch in loo_gen:
                x_test, y_test = batch
                break
            # separate val and train data
            x_val = np.zeros((1000, 1, 23, 1280), dtype='float32')
            y_val = np.zeros((1000), dtype='int32')

            x_train, y_train = 0, 0
            batch_train_errs = []
            for idx, batch in enumerate(loo_gen):
                x_train, y_train = batch
                if idx < 1000:
                    x_train, x_val[idx] = x_train[:-1], x_train[-1:]
                    y_train, y_val[idx] = y_train[:-1], y_train[-1:]
                err = train_fn(x_train, y_train)
                batch_train_errs.append(err)
            epoch_train_err = np.mean(batch_train_errs)
            train_err_list[epoch] = epoch_train_err

            batch_val_errs = [0] * int(1000 / batch_size)
            for idx, batch in enumerate(
                    iterate_minibatches(x_val, y_val, batch_size)):
                inputs, targets = batch
                err = val_fn(inputs, targets)
                batch_val_errs[idx] = err
            epoch_val_err = np.mean(batch_val_errs)
            val_err_list[epoch] = epoch_val_err

            en = time.clock()
            print('| %d \t\t| %.6f\t| %.6f\t| %.2f s' %
                  (epoch + 1, epoch_train_err, epoch_val_err, en - st))
        print('-' * 80)

        print('Training Complete.\n')
        if plotter:
            fig = plt.figure()
            plt.plot(range(num_epochs), train_err, label='Training error')
            plt.plot(range(num_epochs), val_err, label='Validation error')
            plt.title('ConvNet Training')
            plt.xlabel('Epochs')
            plt.ylabel('Error')
            plt.legend()
            plt.show()

        test_err, y_pred, y_prob = nn_test(x_test, y_test, val_fn, prob_fn,
                                           batch_size)
        out_dict['_'.join(['prob', str(szr)])] = y_prob
        out_dict['_'.join(['true', str(szr)])] = y_test

    np.savez(''.join([subject, 'deep.npz']), **out_dict)