# Copyright : (c) UC Regents, Emre Neftci
# Licence : GPLv2
#----------------------------------------------------------------------------- 
from decolle.lenet_decolle_model import LenetDECOLLE
from decolle.utils import parse_args, train, test, accuracy, save_checkpoint, load_model_from_checkpoint, prepare_experiment, write_stats
import datetime, os, socket, tqdm
import numpy as np
import torch
import importlib
args = parse_args('parameters/params.yml')
device = args.device


starting_epoch = 0

params, writer, dirs = prepare_experiment(name='lenet_decolle_{0}'.format(device), args = args)
log_dir = dirs['log_dir']
checkpoint_dir = dirs['checkpoint_dir']

dataset = importlib.import_module(params['dataset'])
create_dataloader = dataset.create_dataloader


verbose = args.verbose

## Load Data
gen_train, gen_test = create_dataloader(chunk_size_train=params['chunk_size_train'],
                                  chunk_size_test=params['chunk_size_test'],
                                  batch_size=params['batch_size'],
                                  dt=params['deltat'], num_workers=params['num_dl_workers'])
Beispiel #2
0
def main():
    # fold = 'C:/Users/stanzarella/data/planned_params/FirstDefinitiveRun/'
    file = 'params_MN_5cl_allcycle_cs150_ov0.5.yml'
    np.set_printoptions(precision=4)
    # args = parse_args('parameters/params.yml')
    # args = parse_args('parameters/params_MN.yml')
    args = parse_args('parameters/' + file)
    # args = parse_args('parameters/params_MN_multipar.yml')
    # args = parse_args('parameters/params_MN_multipar2.yml')
    # args = parse_args('samples/params_to_test/0_a.yml')
    device = args.device

    CV = 1

    for cv in range(0, CV):
        starting_epoch = 0

        params, writer, dirs = prepare_experiment(
            name=__file__.split('/')[-1].split('.')[0], args=args)
        log_dir = dirs['log_dir']
        checkpoint_dir = dirs['checkpoint_dir']

        dataset = importlib.import_module(params['dataset'])
        try:
            create_data = dataset.create_data
        except AttributeError:
            create_data = dataset.create_dataloader

        verbose = args.verbose

        ## Load Data

        gen_train, gen_test = create_data(
            root=params['filename'],
            chunk_size_train=params['chunk_size_train'],
            chunk_size_test=params['chunk_size_test'],
            overlap_size_train_perc=params['overlap_size_train_perc'],
            overlap_size_test_perc=params['overlap_size_test_perc'],
            perc_test_norm=params['perc_test_norm'],
            muscle_to_exclude=params['muscle_to_exclude'],
            class_to_include=params['class_to_include'],
            thr_firing_excl_slice=params['thr_firing_excl_slice'],
            batch_size=params['batch_size'],
            dt=params['deltat'],
            num_workers=params['num_dl_workers'])

        # gen_train, gen_test = create_data(root=params['filename'],
        #                                   chunk_size_train=params['chunk_size_train'],
        #                                   chunk_size_test=params['chunk_size_test'],
        #                                   overlap_size_train_perc=params['overlap_size_train_perc'],
        #                                   overlap_size_test_perc=params['overlap_size_test_perc'],
        #                                   muscle_to_exclude=params['muscle_to_exclude'],
        #                                   batch_size=params['batch_size'],
        #                                   dt=params['deltat'],
        #                                   num_workers=params['num_dl_workers'])

        data_batch, target_batch = next(iter(gen_train))
        data_batch = torch.Tensor(data_batch).to(device)
        target_batch = torch.Tensor(target_batch).to(device)

        #d, t = next(iter(gen_train))
        input_shape = data_batch.shape[-3:]

        #Backward compatibility
        if 'dropout' not in params.keys():
            params['dropout'] = [.5]

        if len(params['input_shape']) == 0:
            params['input_shape'] = [0]

        if params['input_shape'][0] == 0:
            params['input_shape'] = data_batch.shape[-len(params['input_shape']
                                                          ):]

        if type(params['alpha']) is not list:
            params['alpha'] = [params['alpha']]
        if type(params['beta']) is not list:
            params['beta'] = [params['beta']]

        ## Create Model, Optimizer and Loss
        net = LenetDECOLLE1DMN(out_channels=params['out_channels'],
                               Nhid=np.array(params['Nhid']),
                               Mhid=np.array(params['Mhid']),
                               kernel_size=params['kernel_size'],
                               pool_size=params['pool_size'],
                               input_shape=params['input_shape'],
                               alpha=np.array(params['alpha']),
                               alpharp=np.array(params['alpharp']),
                               dropout=params['dropout'],
                               beta=np.array(params['beta']),
                               num_conv_layers=params['num_conv_layers'],
                               num_mlp_layers=params['num_mlp_layers'],
                               lc_ampl=params['lc_ampl'],
                               lif_layer_type=LIFLayer,
                               method=params['learning_method'],
                               with_output_layer=True).to(device)

        if hasattr(params['learning_rate'], '__len__'):
            from decolle.utils import MultiOpt
            opts = []
            for i in range(len(params['learning_rate'])):
                opts.append(
                    torch.optim.Adamax(net.get_trainable_parameters(i),
                                       lr=params['learning_rate'][i],
                                       betas=params['betas']))
            opt = MultiOpt(*opts)
        else:
            opt = torch.optim.Adamax(net.get_trainable_parameters(),
                                     lr=params['learning_rate'],
                                     betas=params['betas'])

        reg_l = params['reg_l'] if 'reg_l' in params else None

        if 'loss_scope' in params and params['loss_scope'] == 'crbp':
            from decolle.lenet_decolle_model import CRBPLoss
            loss = torch.nn.SmoothL1Loss(reduction='none')
            decolle_loss = CRBPLoss(net=net, loss_fn=loss, reg_l=reg_l)
        else:
            loss = [torch.nn.SmoothL1Loss() for i in range(len(net))]
            if net.with_output_layer:
                loss[-1] = cross_entropy_one_hot
            decolle_loss = DECOLLELoss(net=net, loss_fn=loss, reg_l=reg_l)

        ##Initialize
        net.init_parameters(data_batch)

        ##Resume if necessary
        if args.resume_from is not None:
            print("Checkpoint directory " + checkpoint_dir)
            print('Learning rate = {}. Resumed from checkpoint'.format(
                opt.param_groups[-1]['lr']))
            if not os.path.exists(checkpoint_dir) and not args.no_save:
                os.makedirs(checkpoint_dir)

        # Printing parameters
        if args.verbose:
            print('Using the following parameters:')
            m = max(len(x) for x in params)
            for k, v in zip(params.keys(), params.values()):
                print('{}{} : {}'.format(k, ' ' * (m - len(k)), v))

        print('\n------Starting training with {} DECOLLE layers-------'.format(
            len(net)))

        # --------TRAINING LOOP----------
        if not args.no_train:
            test_acc_hist = []
            for e in range(starting_epoch, params['num_epochs']):
                interval = e // params['lr_drop_interval']
                lr = opt.param_groups[-1]['lr']
                if interval > 0:
                    print('Changing learning rate from {} to {}'.format(
                        lr, opt.param_groups[-1]['lr']))
                    opt.param_groups[-1]['lr'] = np.array(
                        params['learning_rate']) / (interval *
                                                    params['lr_drop_factor'])
                else:
                    print('Changing learning rate from {} to {}'.format(
                        lr, opt.param_groups[-1]['lr']))
                    opt.param_groups[-1]['lr'] = np.array(
                        params['learning_rate'])

                if (e % params['test_interval']) == 0 and e != 0:
                    print('---------------Epoch {}-------------'.format(e))
                    if not args.no_save:
                        print('---------Saving checkpoint---------')
                        save_checkpoint(e, checkpoint_dir, net, opt)

                    test_loss, test_acc = test(gen_test,
                                               decolle_loss,
                                               net,
                                               params['burnin_steps'],
                                               print_error=True)
                    test_acc_hist.append(test_acc)

                    if not args.no_save:
                        write_stats(e, test_acc, test_loss, writer)
                        np.save(
                            log_dir + '/test_acc.npy',
                            np.array(test_acc_hist),
                        )

                total_loss, act_rate = train(
                    gen_train,
                    decolle_loss,
                    net,
                    opt,
                    e,
                    params['burnin_steps'],
                    online_update=params['online_update'])
                if not args.no_save:
                    for i in range(len(net)):
                        writer.add_scalar('/act_rate/{0}'.format(i),
                                          act_rate[i], e)
                        writer.add_scalar('/total_loss/{0}'.format(i),
                                          total_loss[i], e)
            starting_epoch = load_model_from_checkpoint(
                checkpoint_dir, net, opt)
                net.err_enc_layers[i].last_err_rate for i in range(len(net))
            ]))

    total_loss /= t_sample
    print('Loss {0}'.format(total_loss))
    print('Activity Rate {0}'.format(act_rate))
    return total_loss, act_rate, error_rates


np.set_printoptions(precision=4)
args = parse_args('parameters/params.yml')
device = args.device

starting_epoch = 0

params, writer, dirs = prepare_experiment(
    name=__file__.split('/')[-1].split('.')[0], args=args)
log_dir = dirs['log_dir']
checkpoint_dir = dirs['checkpoint_dir']

dataset = importlib.import_module(params['dataset'])
try:
    create_data = dataset.create_data
except AttributeError:
    create_data = dataset.create_dataloader

verbose = args.verbose

## Load Data
gen_train, gen_test = create_data(chunk_size_train=params['chunk_size_train'],
                                  chunk_size_test=params['chunk_size_test'],
                                  batch_size=params['batch_size'],
Beispiel #4
0
def main():
    np.set_printoptions(precision=4)
    # args = parse_args('parameters/params.yml')
    args = parse_args('parameters/params_MN.yml')
    # args = parse_args('parameters/params_MN_multipar.yml')
    # args = parse_args('parameters/params_MN_multipar2.yml')
    # args = parse_args('samples/params_to_test/0_a.yml')
    device = args.device

    path_to_save='C:/Users/stanzarella/OneDrive - Fondazione Istituto Italiano Tecnologia/Documents/IIT_Project/2.Codes/FromLiterature/decolle-public/scripts/stats/'

    starting_epoch = 0

    params, writer, dirs = prepare_experiment(name=__file__.split('/')[-1].split('.')[0], args=args)
    # log_dir = dirs['log_dir']
    # checkpoint_dir = dirs['checkpoint_dir']

    dataset = importlib.import_module(params['dataset'])
    try:
        create_data = dataset.create_data
    except AttributeError:
        create_data = dataset.create_dataloader

    verbose = args.verbose

    ## Load Data

    gen_train, gen_test = create_data(root=params['filename'],
                                      chunk_size_train=params['chunk_size_train'],
                                      chunk_size_test=params['chunk_size_test'],
                                      overlap_size_train_perc=params['overlap_size_train_perc'],
                                      overlap_size_test_perc=params['overlap_size_test_perc'],
                                      perc_test_norm=params['perc_test_norm'],
                                      muscle_to_exclude=params['muscle_to_exclude'],
                                      class_to_include=params['class_to_include'],
                                      thr_firing_excl_slice=params['thr_firing_excl_slice'],
                                      batch_size=params['batch_size'],
                                      dt=params['deltat'],
                                      num_workers=params['num_dl_workers'])

    # data_batch, target_batch = next(iter(gen_train))
    # print(data_batch)
    # print(target_batch)
    kernel = 'linear'  # 'rbf'
    Sv = []
    Dv = []
    Tv = []
    TrAccS = []
    TrAccD = []

    # S = np.zeros([data_batch.size()[3], data_batch.size()[0]])
    # D = np.zeros([data_batch.size()[3], data_batch.size()[0]])
    # T = np.zeros([data_batch.size()[3], data_batch.size()[0]])
    for e in range(starting_epoch, params['num_epochs']):

        # Train

        S_epo = []
        D_epo = []
        T_epo = []
        TrAccS_epo = []
        TrAccD_epo = []
        TrAccS_epo_test = []
        TrAccD_epo_test = []
        TrAccS_epo_test_cum = []
        TrAccD_epo_test_cum = []
        X_S_cum = []
        # X_D_cum = []
        Y_cum = []
        M = 0
        iter_gen_train = iter(gen_train)
        for data_batch, target_batch in tqdm.tqdm(iter_gen_train, desc='Epoch {}'.format(e)):
            M = np.max([data_batch.size()[0], M])

            S = np.zeros([data_batch.size()[2], M])
            D = np.zeros([data_batch.size()[2], M])
            T = np.zeros([data_batch.size()[2], M])

            X_S = []
            X_D = []
            Y = []
            for jjj in range(0,data_batch.size()[0]): # Batches
                xs = []
                xd = []
                for iii in range(0, data_batch.size()[2]): # Neurons

                    S[iii,jjj] = int(data_batch[jjj,:,iii].sum())
                    xs.append(S[iii,jjj])

                    if S[iii,jjj] > 0 :
                        D[iii, jjj] = int( np.diff(np.where(data_batch[jjj, :, iii]==1)).sum()/S[iii,jjj] )
                        xd.append(D[iii, jjj])
                    else:
                        xd.append(0)

                    T[iii, jjj] = int(np.array(np.where(target_batch[jjj,iii,:]==1)))

                X_S.append(xs)
                X_D.append(xd)
                Y.append(T[0,jjj])
                X_S_cum.append(xs)
                # X_D_cum.append(xd)
                Y_cum.append(T[0, jjj])
            # SVM ---------------------------------------------------------------------------------------------------
            # Training only with this batch
            # Trained with spike counting
            clf_S = svm.SVC(kernel=kernel, C=1000)
            clf_S.fit(X_S, Y)
            y_pred_S = clf_S.predict(X_S)
            train_acc_S = metrics.accuracy_score(Y, y_pred_S)
            # Trained with ISI
            clf_D = svm.SVC(kernel=kernel, C=1000)
            clf_D.fit(X_D, Y)
            y_pred_D = clf_D.predict(X_D)
            train_acc_D = metrics.accuracy_score(Y, y_pred_D)

            # Training with this batch and the previous
            # Trained with spike counting
            clf_S_cum = svm.SVC(kernel=kernel, C=1000)
            clf_S_cum.fit(X_S_cum, Y_cum)
            # y_pred_S = clf_S.predict(X_S)
            # train_acc_S = metrics.accuracy_score(Y, y_pred_S)
            # Trained with ISI
            # clf_D_cum = svm.SVC(kernel='linear', C=1000)
            # clf_D_cum.fit(X_D_cum, Y_cum)
            # y_pred_D = clf_D.predict(X_D)
            # train_acc_D = metrics.accuracy_score(Y, y_pred_D)

            S_epo.append(S)
            D_epo.append(D)
            T_epo.append(T)
            TrAccS_epo.append(train_acc_S)
            TrAccD_epo.append(train_acc_D)

            ## Test per each train batch with all test batches
            train_acc_S_test = []
            train_acc_D_test = []
            train_acc_S_test_cum = []
            train_acc_D_test_cum = []
            iter_gen_test = iter(gen_test)
            for data_batch_test, target_batch_test in tqdm.tqdm(iter_gen_test, desc='Testing'):
                S_test = []
                D_test = []
                Y_test = []
                for jjj in range(0, data_batch_test.size()[0]):  # Batches
                    xs = []
                    xd = []
                    for iii in range(0, data_batch_test.size()[2]):  # Neurons
                        ss = int(data_batch_test[jjj,:,iii].sum())
                        xs.append(ss)
                        if ss > 0:
                            dd = int(np.diff(np.where(data_batch_test[jjj, :, iii] == 1)).sum() / ss)
                            xd.append(dd)
                        else:
                            xd.append(0)

                        tt = int(np.array(np.where(target_batch_test[jjj, iii, :] == 1)))
                    S_test.append(xs)
                    D_test.append(xd)
                    Y_test.append(tt)
                # Tested with spike counting
                y_pred_S_test = clf_S.predict(S_test)
                y_pred_S_test_cum = clf_S_cum.predict(S_test)
                if y_pred_S_test.__len__() == Y_test.__len__():
                    train_acc_S_test.append(metrics.accuracy_score(Y_test, y_pred_S_test))
                if y_pred_S_test_cum.__len__() == Y_test.__len__():
                    train_acc_S_test_cum.append(metrics.accuracy_score(Y_test, y_pred_S_test_cum))

                # Tested with ISI
                y_pred_D_test = clf_D.predict(D_test)
                # y_pred_D_test_cum = clf_D_cum.predict(D_test)
                if y_pred_D_test.__len__() == Y_test.__len__():
                    train_acc_D_test.append(metrics.accuracy_score(Y_test, y_pred_D_test))
                # if y_pred_D_test_cum.__len__() == Y_test.__len__():
                #     train_acc_D_test_cum.append(metrics.accuracy_score(Y_test, y_pred_D_test_cum))

            TrAccS_epo_test.append(train_acc_S_test)
            TrAccD_epo_test.append(train_acc_D_test)
            TrAccS_epo_test_cum.append(train_acc_S_test_cum)
            TrAccD_epo_test_cum.append(train_acc_D_test_cum)

        # SAVE
        struct = {'S': S_epo, 'D': D_epo, 'T': T_epo,'TrAccS':TrAccS_epo,'TrAccD':TrAccD_epo,'TrAccS_test':TrAccS_epo_test,'TrAccS_test_cum':TrAccS_epo_test_cum,'TrAccD_test':TrAccD_epo_test,'Params': params}
        spio.savemat(path_to_save + 'matlab_matrix_' + str(e) + '_'+ kernel.upper() +'.mat', struct)