Example #1
0
def training(data,
             model_dir,
             num_epoch=20,
             batch_size=200,
             n_steps=100,
             n_inputs=1,
             n_neurons=50,
             n_outputs=1,
             n_layers=3):

    tf.reset_default_graph()

    learning_rate = 0.0001

    X = tf.placeholder(tf.float32, shape=(None, n_steps, n_inputs), name='X')
    y = tf.placeholder(tf.float32, shape=(None, n_steps, n_outputs), name='y')

    cell = [
        tf.contrib.rnn.LSTMCell(num_units=n_neurons)
        for layer in range(n_layers)
    ]
    multicell = tf.contrib.rnn.MultiRNNCell(cell)
    rnn_outputs, states = tf.nn.dynamic_rnn(multicell,
                                            inputs=X,
                                            dtype=tf.float32)
    stacked_rnn_outputs = tf.reshape(tensor=rnn_outputs, shape=(-1, n_neurons))
    stacked_outputs = tf.layers.dense(stacked_rnn_outputs,
                                      units=n_outputs,
                                      name='stacked_output')
    outputs = tf.reshape(stacked_outputs,
                         shape=(-1, n_steps, n_outputs),
                         name='output')
    mse = tf.reduce_mean(tf.square(outputs - y), name='mse')
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    training_op = optimizer.minimize(mse)

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    training_loss = []
    epoch_count = []
    with tf.Session() as sess:
        init.run()
        for epoch in range(num_epoch):
            X_train, y_train = fetch_batch(data.reshape(-1, 1), batch_size,
                                           n_steps)
            sess.run(training_op, feed_dict={X: X_train, y: y_train})
            if epoch % 10 == 0:
                loss = sess.run(mse, feed_dict={X: X_train, y: y_train})
                training_loss.append(loss)
                epoch_count.append(epoch)
                if (epoch % 100 == 0):
                    print('Epoch: ', epoch, ' Training loss: ', loss)
        saver.save(sess, model_dir)

    return sess, training_loss, epoch_count
output = tf.layers.dense(dense_2, units=pred_window, name="output")

mse = tf.reduce_mean(tf.square(output-y),name="MSE")

optimizer = tf.train.AdamOptimizer(learning_rate=0.0008)

training_op = optimizer.minimize(mse)

saver = tf.train.Saver()
init = tf.global_variables_initializer()

#%%
with tf.Session() as sess:
    sess.run(init)
    for epoch in range(num_epoch):
        X_batch, y_batch, _ = fetch_batch(train_std,train_date, batch_size, l, w, pred_window)
        #X_batch, y_batch,_ = fetch_batch(train_set,train_date, batch_size, l, w, pred_window)
        sess.run(training_op, feed_dict = {X:X_batch, y: y_batch})
        if epoch%50==0:
            train_error = sess.run(mse, feed_dict = {X:X_batch, y: y_batch})
            test_x, test_y,_ = fetch_batch(test_std,test_date, 1, l, w, pred_window)
            #test_x, test_y,_ = fetch_batch(test_set,test_date, 1, l, w, pred_window)
            test_error = sess.run(mse, feed_dict = {X:test_x, y: test_y})
            print("Epoch: ",epoch, " Training error: ", train_error, " Test error: ", test_error)
    saver.save(sess,directory)
    #plt.figure()
    #plt.plot()
    

#print(mse)
Example #3
0
def training(data,
             date,
             model_dir,
             num_epoch=20,
             batch_size=200,
             pred_window=30):

    tf.reset_default_graph()

    l = 10
    w = 10
    c = 1

    X = tf.placeholder(dtype=tf.float32, shape=(None, l, w, c), name="X")
    y = tf.placeholder(dtype=tf.float32, shape=(None, pred_window), name="y")

    conv_1 = tf.layers.conv2d(X,
                              filters=8,
                              kernel_size=(5, 5),
                              strides=1,
                              padding="same",
                              activation=tf.nn.relu,
                              name="conv_1")
    print(conv_1)
    conv_2 = tf.layers.conv2d(conv_1,
                              filters=16,
                              kernel_size=(5, 5),
                              strides=1,
                              padding="same",
                              activation=tf.nn.relu,
                              name="conv_2")
    print(conv_2)
    #    pool_2 = tf.layers.max_pooling2d(conv_2,
    #                                     pool_size=(3,3),
    #                                     strides=2,
    #                                     name = "max_pool_2")

    flatten = tf.layers.flatten(conv_2)
    print(flatten)
    dense_1 = tf.layers.dense(flatten,
                              units=800,
                              activation=tf.nn.relu,
                              name="dense_1")

    dense_2 = tf.layers.dense(dense_1,
                              units=200,
                              activation=tf.nn.relu,
                              name="dense_2")

    dense_3 = tf.layers.dense(dense_1,
                              units=80,
                              activation=tf.nn.relu,
                              name="dense_3")

    output = tf.layers.dense(dense_3, units=pred_window, name="output")

    mse = tf.reduce_mean(tf.square(output - y), name="MSE")

    optimizer = tf.train.AdamOptimizer(learning_rate=0.0008)

    training_op = optimizer.minimize(mse)

    saver = tf.train.Saver()
    init = tf.global_variables_initializer()

    training_loss = []
    epoch_count = []

    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(num_epoch):
            X_batch, y_batch, _ = fetch_batch(data, date, batch_size, l, w,
                                              pred_window)
            sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
            if epoch % 10 == 0:
                loss = sess.run(mse, feed_dict={X: X_batch, y: y_batch})
                training_loss.append(loss)
                epoch_count.append(epoch)
                if (epoch % 100 == 0):
                    print('Epoch:', epoch, ' Training loss:', loss)
        saver.save(sess, model_dir)
    return sess, training_loss, epoch_count
Example #4
0
output = tf.layers.dense(dense_2, units=pred_window, name="output")

mse = tf.reduce_mean(tf.square(output-y),name="MSE")

optimizer = tf.train.AdamOptimizer(learning_rate=0.001)

training_op = optimizer.minimize(mse)

saver = tf.train.Saver()
init = tf.global_variables_initializer()

#%%
with tf.Session() as sess:
    sess.run(init)
    for epoch in range(num_epoch):
        X_batch, y_batch = fetch_batch(train_std, batch_size, l, w, pred_window)
        #X_batch, y_batch = fetch_batch(train_set, batch_size, l, w, pred_window)
        sess.run(training_op, feed_dict = {X:X_batch, y: y_batch})
        if epoch%50==0:
            train_error = sess.run(mse, feed_dict = {X:X_batch, y: y_batch})
            test_x, test_y = fetch_batch(test_std, 1, l, w, pred_window)
            #test_x, test_y = fetch_batch(test_set, 1, l, w, pred_window)
            test_error = sess.run(mse, feed_dict = {X:test_x, y: test_y})
            print("Epoch: ",epoch, " Training error: ", train_error, " Test error: ", test_error)
    saver.save(sess,directory)
    #plt.figure()
    #plt.plot()
    

#print(mse)