lstmoptim = 'adadelta'
lstmnepochs = 2 #20
lstmbatchsize = 64


#
# Generate the dataset and run analysis for each parameter combination of interest
#

# tuples in format (#static, #dynamic)
params = [(10000, 10), (1000, 10), (100, 10), (10, 10), (10, 100), (10, 1000), (10, 10000)]
for p in params:
    n_static = p[0]
    n_dynamic = p[1]
    print "Running with parameters n_static = %d, n_dynamic = %d" % (n_static, n_dynamic)
    static_train, dynamic_train, static_val, dynamic_val, labels_train, labels_val = generate_lstm_wins(5000, n_static, n_dynamic, 70)

    # merge train and test
    static_all = np.concatenate((static_train, static_val), axis=0)
    dynamic_all = np.concatenate((dynamic_train, dynamic_val), axis=0)
    labels_all = np.concatenate((labels_train, labels_val), axis=0)
    nsamples = static_all.shape[0]


    # k-fold CV for enrichment

    # prepare where to store the ratios
    ratios_all_hmm = np.empty(len(labels_all))
    ratios_all_lstm = np.empty(len(labels_all))

    # split indices into folds
lstmnepochs = 2  #20
lstmbatchsize = 64

#
# Generate the dataset and run analysis for each parameter combination of interest
#

# tuples in format (#static, #dynamic)
params = [(10000, 10), (1000, 10), (100, 10), (10, 10), (10, 100), (10, 1000),
          (10, 10000)]
for p in params:
    n_static = p[0]
    n_dynamic = p[1]
    print "Running with parameters n_static = %d, n_dynamic = %d" % (n_static,
                                                                     n_dynamic)
    static_train, dynamic_train, static_val, dynamic_val, labels_train, labels_val = generate_lstm_wins(
        5000, n_static, n_dynamic, 70)

    # merge train and test
    static_all = np.concatenate((static_train, static_val), axis=0)
    dynamic_all = np.concatenate((dynamic_train, dynamic_val), axis=0)
    labels_all = np.concatenate((labels_train, labels_val), axis=0)
    nsamples = static_all.shape[0]

    # k-fold CV for enrichment

    # prepare where to store the ratios
    ratios_all_hmm = np.empty(len(labels_all))
    ratios_all_lstm = np.empty(len(labels_all))

    # split indices into folds
    predict_idx_list = np.array_split(range(nsamples), nfolds)
Ejemplo n.º 3
0
lstmsize = 256
lstmdropout = 0.0
lstmoptim = 'rmsprop'
lstmnepochs = 20
lstmbatchsize = 32

# open file to store results
f = open('../../Results/grid_lstm_wins.csv', 'a')


#
# Load data
#

# generate the dataset
train_static, train_dynamic, test_static, test_dynamic, train_labels, test_labels = generate_lstm_wins(nsamples, nfeatures, nseqfeatures, seqlen) 
train_nsamples = train_static.shape[0]
test_nsamples = test_static.shape[0]

# split training into two halves
train_half = train_nsamples / 2
trainA_static = train_static[:train_half]
trainB_static = train_static[train_half:]
trainA_dynamic = train_dynamic[:train_half]
trainB_dynamic = train_dynamic[train_half:]
trainA_labels = train_labels[:train_half]
trainB_labels = train_labels[train_half:]


#
# Train enrichment models