Esempio n. 1
0
def hub():
    data, timeline = model.generate_data()
    return render_template('hub.html', data=data, timeline=timeline)
Esempio n. 2
0
PRINT_STEPS = TRAINING_STEPS / 100

my_dir = os.sep.join([os.path.expanduser('~'), 'Desktop', 'sine'])

regressor = learn.TensorFlowEstimator(model_fn=lstm_model(TIMESTEPS, RNN_LAYERS, 
                                                          DENSE_LAYERS), 
                                      n_classes=0,
                                      verbose=2,  
                                      steps=TRAINING_STEPS, 
                                      optimizer='SGD',
                                      learning_rate=0.001, 
                                      batch_size=BATCH_SIZE,
                                      class_weight = [1])

#generate SINE WAVE data
X, y = generate_data(np.sin, np.linspace(0, 100, 5000), TIMESTEPS, seperate=False)
# create a lstm instance and validation monitor
validation_monitor = learn.monitors.ValidationMonitor(X['val'], y['val'],
                                                      every_n_steps=PRINT_STEPS,
                                                      early_stopping_rounds=100000)

regressor.fit(X['train'], y['train'], monitors=[validation_monitor], logdir=LOG_DIR)

# based off training, get the predictions
predicted = regressor.predict(X['test'])

predicted1 = predicted
print("INITIAL PRED: ", predicted)

# cycle predictions into the testing input one at a time
# ie: get first prediction, insert into testing data, rerun predictions
Esempio n. 3
0
                                      optimizer='SGD',
                                      learning_rate=LEARNING_RATE, 
                                      batch_size=BATCH_SIZE,
                                      continue_training=True)

#read the data 
print("Reading CSV file...")
with open('pub.csv') as f:
    data = list(reader(f.read().splitlines()))

    # get output
    # for 'data.csv', standardized impressions are in column 5
    adOps = [float(i[5]) for i in data[1::]]
    tf.to_float(adOps, name='ToFloat') 

X, y = generate_data(adOps, TIMESTEPS, seperate=False)
regressor.fit(X['train'], y['train'])
    
# based off training, get the predictions
# these initial predictions use measured values in the input, 
# not the predicted values. will implement recursive technique later
predicted = regressor.predict(X['test'])

# store the initial predictions
predicted1 = predicted
    
# recursive prediction set up:
# get first prediction, insert into testing data, rerun predictions
# prediction is cycled in TIMESTEPS number of times
# ie: first prediction becomes final value of X1, second to last value of X2, etc. 
#      X['test][i+1][-1]=predicted[i]
Esempio n. 4
0
# outputs, state = rnn.rnn(thetaNeuron, inputs, init_state)

sess = tf.Session()
writer = tf.train.SummaryWriter("{}/tf".format(os.environ["HOME"]), sess.graph)

#loss = tf.add_n([ tf.nn.l2_loss(target - outputs) for output, target in zip(outputs, targets) ]) / seq_size / batch_size / net_size

#lr = tf.Variable(0.0, trainable=False)
#tvars = tf.trainable_variables()
#grads_raw = tf.gradients(loss, tvars)
# grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), 5.0)

#optimizer = tf.train.GradientDescentOptimizer(lr)
#train_step = optimizer.apply_gradients(zip(grads_raw, tvars))

inputs_v, targets_v = generate_data(input_size, net_size, seq_size, batch_size,
                                    signal_form)
state_v = np.zeros((batch_size, net_size))
states, outs, wgrads, ugrads, bgrads = [], [], [], [], []
with tf.device("/cpu:0"):
    for idx, (inp, inp_v, targ,
              targ_v) in enumerate(zip(inputs, inputs_v, targets, targets_v)):
        print idx
        if idx > 0:
            tf.get_variable_scope().reuse_variables()
        sess.run(tf.assign(lr, lrate))

        out, new_state = thetaNeuron(inp, state)
        if idx == 0:
            init = tf.initialize_all_variables()
            sess.run(init)
Esempio n. 5
0
TRAINING_STEPS = 5000
BATCH_SIZE = 100
PRINT_STEPS = TRAINING_STEPS / 10

regressor = learn.TensorFlowEstimator(model_fn=lstm_model(TIMESTEPS, RNN_LAYERS, 
                                                          DENSE_LAYERS), 
                                      n_classes=0,
                                      verbose=2,  
                                      steps=TRAINING_STEPS, 
                                      optimizer='SGD',
                                      learning_rate=0.03, 
                                      batch_size=BATCH_SIZE,
                                      class_weight = [1])


X, y = generate_data(ecg_template_noisy, TIMESTEPS, seperate=False)

# create a lstm instance and validation monitor
validation_monitor = learn.monitors.ValidationMonitor(X['val'], y['val'],
                                                      every_n_steps=PRINT_STEPS,
                                                      early_stopping_rounds=100000)

regressor.fit(X['train'], y['train'], monitors=[validation_monitor], logdir=LOG_DIR)

# based off training, get the predictions
predicted = regressor.predict(X['test'])

predicted1 = predicted
print("INITIAL PRED: ", predicted)

'''recursive prediction:
Esempio n. 6
0
sess = tf.Session()
writer = tf.train.SummaryWriter("{}/tf".format(os.environ["HOME"]), sess.graph)


# loss = tf.add_n([ tf.nn.l2_loss(target - outputs) for output, target in zip(outputs, targets) ]) / seq_size / batch_size / net_size

# lr = tf.Variable(0.0, trainable=False)
# tvars = tf.trainable_variables()
# grads_raw = tf.gradients(loss, tvars)
# grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), 5.0)

# optimizer = tf.train.GradientDescentOptimizer(lr)
# train_step = optimizer.apply_gradients(zip(grads_raw, tvars))


inputs_v, targets_v = generate_data(input_size, net_size, seq_size, batch_size, signal_form)
state_v = np.zeros((batch_size, net_size))
states, outs, wgrads, ugrads, bgrads = [], [], [], [], []
with tf.device("/cpu:0"):
    for idx, (inp, inp_v, targ, targ_v) in enumerate(zip(inputs, inputs_v, targets, targets_v)):
        print idx
        if idx > 0:
            tf.get_variable_scope().reuse_variables()
        sess.run(tf.assign(lr, lrate))

        out, new_state = thetaNeuron(inp, state)
        if idx == 0:
            init = tf.initialize_all_variables()
            sess.run(init)

        loss = tf.nn.l2_loss(targ - out)
Esempio n. 7
0
                    json.dump(res, f)

            print 'Done!'
            print


if __name__ == '__main__':

    np.random.seed(1)
    # Data parameters
    train_size = 100000
    valid_size = 10000

    value_low = -100
    value_high = 100
    min_length = 1
    max_length = 10
    num_epochs = 5

    train_df = generate_data(size=train_size, value_low=value_low, value_high=value_high,
                             min_length=min_length, max_length=max_length)
    valid_df = generate_data(size=valid_size, value_low=value_low, value_high=value_high,
                             min_length=min_length, max_length=max_length)

    data_path = os.path.join(DATA_PATH, '2eve')

    train_df.to_csv(os.path.join(data_path, 'train.csv'))
    valid_df.to_csv(os.path.join(data_path, 'valid.csv'))

    run_validation(data_path, 100, num_epochs, train_df, valid_df)