def neural_networks(prices, days, hidden_neurons, callback, n_days, threshold=0.67, window_size=30, batch_size=64, window_shift=1, nof_epochs=100, lr_rate=1e-6, mom=0.9): # Split data split, train_prices, train_days, test_prices, test_days = fr.split_data( prices, days, threshold=threshold) # Reset all internal variables init() # Create windows on training data train_windows, train_batch = fr.create_windows(train_prices, window_size=window_size, batch_size=batch_size, w_shift=window_shift) # Introduce model dnn_model = kr.Sequential() dnn_model.add( kr.layers.Dense(units=10, input_shape=[window_size], activation='relu')) # input layer with relu activation for neuron in hidden_neurons: dnn_model.add(kr.layers.Dense( units=neuron, activation='relu')) # hidden layer with relu activation # dnn_model.add(kr.layers.Dropout(0.1)) # add dropout to prevent over-fitting dnn_model.add(kr.layers.Dense(1)) # output layer dnn_model.summary() # Choose mse loss function and sgd optimizer dnn_model.compile(loss="mse", optimizer=kr.optimizers.SGD(lr=lr_rate, momentum=mom)) if len(callback) == 0: history_dnn = dnn_model.fit(train_batch, epochs=4 * nof_epochs) # Predictions print("Point by point prediction") predicted_dnn = fr.point_prediction(dnn_model, prices, split, window_size) mse, mae = fr.evaluate_model(history_dnn, test_prices, predicted_dnn, test_days, callback) print("Predict next n days function") predicted_dnn = fr.predict_next_n_days(n_days, dnn_model, prices, split, window_size) mse, mae = fr.evaluate_model(history_dnn, test_prices[:n_days], predicted_dnn, test_days[:n_days], callback) else: history_dnn = dnn_model.fit(train_batch, callbacks=callback, epochs=nof_epochs) vis.plot_lr(history_dnn) mse, mae = 0, 0 return mse, mae
def linear_regression(prices, days, callback, n_days, threshold=0.67, window_size=30, batch_size=64, window_shift=1, nof_epochs=100, lr_rate=1e-6, mom=0.9): # Split data split, train_prices, train_days, test_prices, test_days = fr.split_data( prices, days, threshold=threshold) # Reset all internal variables init() # Create windows on training data train_windows, train_batch = fr.create_windows(train_prices, window_size=window_size, batch_size=batch_size, w_shift=window_shift) # Introduce model lr_layer = kr.layers.Dense(1, input_shape=[window_size ]) # single layer with one neuron lr_model = kr.models.Sequential(lr_layer) lr_model.summary() # Choose mse loss function and sgd optimizer lr_model.compile(loss="mse", optimizer=kr.optimizers.SGD(lr=lr_rate, momentum=mom)) if len(callback) == 0: history_lr = lr_model.fit(train_batch, epochs=4 * nof_epochs) print("Parameters") print(lr_layer.get_weights()) # Predictions print("Point by point prediction") predicted_lr = fr.point_prediction(lr_model, prices, split, window_size) mse, mae = fr.evaluate_model(history_lr, test_prices, predicted_lr, test_days, callback) print("Predict next n days function") predicted_lr = fr.predict_next_n_days(n_days, lr_model, prices, split, window_size) mse, mae = fr.evaluate_model(history_lr, test_prices[:n_days], predicted_lr, test_days[:n_days], callback) else: history_lr = lr_model.fit(train_batch, callbacks=callback, epochs=nof_epochs) vis.plot_lr(lr_model) mse, mae = 0, 0 return mse, mae
def cnn_lstm(prices, days, cells, callback, n_days, bi_directional=True, threshold=0.67, window_size=30, batch_size=64, window_shift=1, nof_epochs=100, lr_rate=1e-6, mom=0.9): # Split data split, train_prices, train_days, test_prices, test_days = fr.split_data( prices, days, threshold=threshold) # Reset all internal variables init() train_prices = tf.expand_dims(train_prices, axis=-1) # Create windows on training data train_windows, train_batch = fr.create_windows(train_prices, window_size=window_size, batch_size=batch_size, w_shift=window_shift) # Introduce model cnn_lstm_model = kr.models.Sequential() cnn_lstm_model.add( kr.layers.Conv1D( filters=32, kernel_size=5, strides=1, padding='causal', activation='relu', input_shape=[ None, 1 ])) # pre-process the input dim for uni-variate analysis cells.append(0) if bi_directional: cnn_lstm_model = bi_directional_layers(cnn_lstm_model, cells) else: cnn_lstm_model = uni_directional_layers(cnn_lstm_model, cells) cnn_lstm_model.add(kr.layers.Dense(1)) # output layer cnn_lstm_model.add(kr.layers.Lambda(lambda x: 200.0 * x)) # scaling cnn_lstm_model.summary() # Choose Huber loss function which is less susceptible to outliers and noise. cnn_lstm_model.compile(loss=kr.losses.Huber(), optimizer=kr.optimizers.SGD(lr=lr_rate, momentum=mom), metrics=["mae", "mse"]) # Predictions if len(callback) == 0: history_cnn_lstm = cnn_lstm_model.fit(train_batch, epochs=4 * nof_epochs) # Predictions # print("Point by point prediction") # predicted_cnn_lstm = fr.point_prediction(cnn_lstm_model, prices, split, window_size) # mse, mae = fr.evaluate_model(history_cnn_lstm, test_prices, predicted_cnn_lstm, test_days, callback) # print("Predict next n days function") # predicted_cnn_lstm = fr.predict_next_n_days(n_days, cnn_lstm_model, prices, split, window_size) # mse, mae = fr.evaluate_model(history_cnn_lstm, test_prices[:n_days], predicted_cnn_lstm, test_days[:n_days], callback) print("Model forecast function") predicted_cnn_lstm = fr.model_forecast(cnn_lstm_model, prices[..., np.newaxis], window_size, batch_size) predicted_cnn_lstm = predicted_cnn_lstm[split_time - window_size:-1, -1, 0] mse, mae = fr.evaluate_model(history_cnn_lstm, test_prices, predicted_cnn_lstm, test_days, callback) else: history_cnn_lstm = cnn_lstm_model.fit(train_batch, epochs=nof_epochs, callbacks=callback) mse, mae = 0, 0 return mse, mae
def recurrent_nn(prices, days, cells, callback, n_days, threshold=0.67, window_size=30, batch_size=64, window_shift=1, nof_epochs=100, lr_rate=1e-6, mom=0.9): # Split data split, train_prices, train_days, test_prices, test_days = fr.split_data( prices, days, threshold=threshold) # Reset all internal variables init() # Create windows on training data train_windows, train_batch = fr.create_windows(train_prices, window_size=window_size, batch_size=batch_size, w_shift=window_shift) # Introduce model rnn_model = kr.models.Sequential() rnn_model.add( kr.layers.Lambda( lambda data: tf.expand_dims(data, axis=-1), input_shape=[ None ])) # pre-process the input dim for uni-variate analysis for cell in cells[:-1]: rnn_model.add(kr.layers.SimpleRNN( units=cell, return_sequences=True)) # hidden layer with relu activation rnn_model.add(kr.layers.SimpleRNN( units=cells[-1], return_sequences=False)) # hidden layer with relu activation rnn_model.add(kr.layers.Dense(1)) # output layer rnn_model.add(kr.layers.Lambda(lambda x: 100.0 * x)) # scaling rnn_model.summary() # Choose Huber loss function which is less susceptible to outliers and noise. rnn_model.compile(loss=kr.losses.Huber(), optimizer=kr.optimizers.SGD(lr=lr_rate, momentum=mom), metrics=["mae", "mse"]) # Predictions # Predictions if len(callback) == 0: history_rnn = rnn_model.fit(train_batch, epochs=4 * nof_epochs) # Predictions print("Point by point prediction") predicted_rnn = fr.point_prediction(rnn_model, prices, split, window_size) mse, mae = fr.evaluate_model(history_rnn, test_prices, predicted_rnn, test_days, callback) print("Predict next n days function") predicted_rnn = fr.predict_next_n_days(n_days, rnn_model, prices, split, window_size) mse, mae = fr.evaluate_model(history_rnn, test_prices[:n_days], predicted_rnn, test_days[:n_days], callback) else: history_rnn = rnn_model.fit(train_batch, epochs=nof_epochs, callbacks=callback) vis.plot_lr(history_rnn) mse, mae = 0, 0 return mse, mae