예제 #1
0
def ecoglstm(lstmsize, fcsize, dropout, optim):

    print("Reading data...")
    data = np.load(
        "/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/train_data.npy")
    labels = np.load(
        "/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/train_labels.npy")
    train_data, train_labels, val_data, val_labels = DataHandler.split(
        0.7, data, labels)

    lstmcl = LSTMDiscriminative(lstmsize[0], fcsize[0], dropout[0], optim[0],
                                10, 128)
    model = lstmcl.train(train_data, train_labels)
    result = -lstmcl.test(model, val_data, val_labels)

    print('Result = %f' % result)
    return result
    mse_pos, mse_neg = lstmcl.predict_mse(model_pos, model_neg, dynamic_all[predict_idx]) 
    ratios_generative[predict_idx] = lstmcl.pos_neg_ratios(model_pos, model_neg, dynamic_all[predict_idx])

    # extract activations
    activations_pos = lstmcl.activations(model_pos, dynamic_all[predict_idx])
    activations_neg = lstmcl.activations(model_neg, dynamic_all[predict_idx])
    activations_generative[predict_idx] = np.concatenate((activations_pos[:, -1, :], activations_neg[:, -1, :]), axis=1)
    """

    #
    # Discriminative LSTM
    #
    print "    Extracting ratios and activations from discriminative LSTM..."

    # train the model
    lstmcl = LSTMDiscriminative(d_lstmsize, d_fcsize, d_lstmdropout,
                                d_lstmoptim, d_lstmnepochs, d_lstmbatch)
    model = lstmcl.train(dynamic_all[train_idx], labels_all[train_idx])

    # extract ratios
    ratios_discriminative[predict_idx] = lstmcl.pos_neg_ratios(
        model, dynamic_all[predict_idx])

    # extract activations
    activations_discriminative[predict_idx] = lstmcl.activations(
        model, dynamic_all[predict_idx])

#
# Prepare combined datasets for the future experiments
#
print 'Enriching the datasets...'
#enriched_by_generative_ratios = np.concatenate((static_all, np.matrix(ratios_generative).T), axis=1)
    ratios_generative[predict_idx] = lstmcl.pos_neg_ratios(
        model_pos, model_neg, dynamic_all[predict_idx])

    # extract activations
    activations_pos = lstmcl.activations(model_pos, dynamic_all[predict_idx])
    activations_neg = lstmcl.activations(model_neg, dynamic_all[predict_idx])
    activations_generative[predict_idx] = np.concatenate(
        (activations_pos[:, -1, :], activations_neg[:, -1, :]), axis=1)

    #
    # Discriminative LSTM
    #
    print "    Extracting ratios and activations from discriminative LSTM..."

    # train the model
    lstmcl = LSTMDiscriminative(d_lstmsize, d_lstmdropout, d_lstmoptim,
                                d_lstmnepochs, d_lstmbatch)
    model = lstmcl.train(dynamic_all[train_idx], labels_all[train_idx])

    # extract ratios
    ratios_discriminative[predict_idx] = lstmcl.pos_neg_ratios(
        model, dynamic_all[predict_idx])

    # extract activations
    activations_discriminative[predict_idx] = lstmcl.activations(
        model, dynamic_all[predict_idx])

#
# Prepare combined datasets for the future experiments
#
print 'Enriching the datasets...'
enriched_by_generative_ratios = np.concatenate(
import numpy as np
from LSTM.lstm_classifier import LSTMDiscriminative

# parameters
lstmsize = 300
fcsize = 100
dropout = 0.0
optim = 'adadelta'
nepochs = 30
batchsize = 512

# load the dataset
print 'Loading the dataset..'
static_train = np.load('/storage/hpc_anna/GMiC/Data/ECoGmixed/fourier/train_data.npy')
dynamic_train = np.load('/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/train_data.npy')
static_val = np.load('/storage/hpc_anna/GMiC/Data/ECoGmixed/fourier/test_data.npy')
dynamic_val = np.load('/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/test_data.npy')
labels_train = np.load('/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/train_labels.npy')
labels_val = np.load('/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/test_labels.npy')
nsamples = dynamic_train.shape[0]

# split the data into training and test
train_idx = np.random.choice(range(0, nsamples), size=np.round(nsamples * 0.7, 0), replace=False)
test_idx = list(set(range(0, nsamples)) - set(train_idx))

# train the model and report performance
lstmcl = LSTMDiscriminative(lstmsize, fcsize, dropout, optim, nepochs, batchsize)
model = lstmcl.train(dynamic_train[train_idx], labels_train[train_idx])
print 'Generative LSTM classifier on dynamic features: %.4f' % lstmcl.test(model, dynamic_train[test_idx], labels_train[test_idx])

batchsize = 512

# load the dataset
print 'Loading the dataset..'
static_train = np.load(
    '/storage/hpc_anna/GMiC/Data/ECoGmixed/fourier/train_data.npy')
dynamic_train = np.load(
    '/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/train_data.npy')
static_val = np.load(
    '/storage/hpc_anna/GMiC/Data/ECoGmixed/fourier/test_data.npy')
dynamic_val = np.load(
    '/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/test_data.npy')
labels_train = np.load(
    '/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/train_labels.npy')
labels_val = np.load(
    '/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/test_labels.npy')
nsamples = dynamic_train.shape[0]

# split the data into training and test
train_idx = np.random.choice(range(0, nsamples),
                             size=np.round(nsamples * 0.7, 0),
                             replace=False)
test_idx = list(set(range(0, nsamples)) - set(train_idx))

# train the model and report performance
lstmcl = LSTMDiscriminative(lstmsize, fcsize, dropout, optim, nepochs,
                            batchsize)
model = lstmcl.train(dynamic_train[train_idx], labels_train[train_idx])
print 'Generative LSTM classifier on dynamic features: %.4f' % lstmcl.test(
    model, dynamic_train[test_idx], labels_train[test_idx])
    mse_pos, mse_neg = lstmcl.predict_mse(model_pos, model_neg, dynamic_all[predict_idx]) 
    ratios_generative[predict_idx] = lstmcl.pos_neg_ratios(model_pos, model_neg, dynamic_all[predict_idx])

    # extract activations
    activations_pos = lstmcl.activations(model_pos, dynamic_all[predict_idx])
    activations_neg = lstmcl.activations(model_neg, dynamic_all[predict_idx])
    activations_generative[predict_idx] = np.concatenate((activations_pos[:, -1, :], activations_neg[:, -1, :]), axis=1)
    """

    #
    # Discriminative LSTM
    #
    print "    Extracting ratios and activations from discriminative LSTM..."

    # train the model
    lstmcl = LSTMDiscriminative(d_lstmsize, d_fcsize, d_lstmdropout, d_lstmoptim, d_lstmnepochs, d_lstmbatch)
    model = lstmcl.train(dynamic_all[train_idx], labels_all[train_idx])

    # extract ratios
    ratios_discriminative[predict_idx] = lstmcl.pos_neg_ratios(model, dynamic_all[predict_idx])

    # extract activations
    activations_discriminative[predict_idx] = lstmcl.activations(model, dynamic_all[predict_idx])


#
# Prepare combined datasets for the future experiments
#
print "Enriching the datasets..."
# enriched_by_generative_ratios = np.concatenate((static_all, np.matrix(ratios_generative).T), axis=1)
# enriched_by_generative_activations = np.concatenate((static_all, activations_generative), axis=1)
static_train = np.load("/storage/hpc_anna/GMiC/Data/ECoGmixed/fourier/train_data.npy")
dynamic_train = np.load("/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/train_data.npy")
static_test = np.load("/storage/hpc_anna/GMiC/Data/ECoGmixed/fourier/test_data.npy")
dynamic_test = np.load("/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/test_data.npy")
labels_train = np.load("/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/train_labels.npy")
labels_test = np.load("/storage/hpc_anna/GMiC/Data/ECoGmixed/preprocessed/test_labels.npy")
nsamples = dynamic_train.shape[0]

# split the data into training and test
train_idx = np.random.choice(range(0, nsamples), size=np.round(nsamples * 0.7, 0), replace=False)
val_idx = list(set(range(0, nsamples)) - set(train_idx))
dynamic_val = dynamic_train[val_idx]
labels_val = labels_train[val_idx]
dynamic_train = dynamic_train[train_idx]
labels_train = labels_train[train_idx]

# train the model and report performance
# lstmcl = LSTMDiscriminative(lstmsize, fcsize, dropout, optim, nepochs, batchsize, validation_split=0.3)
# model = lstmcl.train(dynamic_train, labels_train)
# print 'Generative LSTM classifier on dynamic features: %.4f' % lstmcl.test(model, dynamic_val, labels_val)

tr_dynamic_val = np.transpose(dynamic_val, (0, 2, 1))
enc = OneHotEncoder(sparse=False)
tr_labels_val = enc.fit_transform(np.matrix(labels_val).T)

lstmcl = LSTMDiscriminative(
    lstmsize, fcsize, dropout, optim, nepochs, batchsize, validation_data=(tr_dynamic_val, tr_labels_val)
)
model = lstmcl.train(dynamic_train, labels_train)
print "Generative LSTM classifier on dynamic features: %.4f" % lstmcl.test(model, dynamic_val, labels_val)