def learn_nn_and_save(training_data: StockData, test_data: StockData, filename_to_save: str): network = create_model() network.compile(loss='mean_squared_error', optimizer='sgd') values = training_data.get_values() setCount = len(values) - (WINDOW_SIZE + 1) xtrain = [] for element in range(0, setCount): xtrain.append(numpy.array(values[element:WINDOW_SIZE + element])) X_TRAIN = numpy.array(xtrain) Y_TRAIN = numpy.empty(setCount, dtype=numpy.float) offset = WINDOW_SIZE - 1 for element in range(0, setCount): current = values[element + offset] next = values[element + offset + 1] Y_TRAIN[element] = 1.0 if next > current else -1.0 history = network.fit(X_TRAIN, Y_TRAIN, epochs=EPOCHS, batch_size=BATCH_SIZE) draw_history(history) # Save trained model: separate network structure (stored as JSON) and trained weights (stored as HDF5) save_keras_sequential(network, RELATIVE_PATH, filename_to_save)
def learn_nn_and_save(data: StockData, filename_to_save: str): """ Starts the training of the neural network and saves it to the file system Args: data: The data to train on filename_to_save: The filename to save the trained NN to """ dates = data.get_dates() prices = data.get_values() # Generate training data # Build chunks of prices from 100 consecutive days (input_prices) and 101th day (current_prices_for_plot) current_prices_for_plot, input_prices, wanted_results = get_data(prices) # Shape and configuration of network is optimized for binary classification problems # see: https://keras.io/getting-started/sequential-model-guide/ network = create_model() network.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) # Train the neural network reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.9, patience=5, min_lr=0.000001, verbose=1) history = network.fit(input_prices, wanted_results, epochs=500, batch_size=128, verbose=1, validation_data=(input_prices, wanted_results), shuffle=True, callbacks=[reduce_lr]) # Evaluate the trained neural network and plot results score = network.evaluate(input_prices, wanted_results, batch_size=128, verbose=0) logger.debug(f"Test score: {score}") # Draw plt.figure() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.plot(history.history['acc']) plt.title('training loss / testing loss by epoch') plt.ylabel('loss/acc') plt.xlabel('epoch') plt.legend(['loss', 'val_loss', 'acc'], loc='best') plt.figure() current_price_prediction = network.predict(input_prices, batch_size=128) logger.debug(f"current_price_prediction:") iteration = 0 for x in current_price_prediction: logger.debug(f"iteration {iteration} - output: {x}") iteration = iteration + 1 plt.plot(dates[INPUT_SIZE:], current_prices_for_plot, color="black") # current prices in reality plt.plot(dates[INPUT_SIZE:], [calculate_delta(x) for x in current_price_prediction], color="green") # predicted prices by neural network plt.title('current prices / predicted prices by date') plt.ylabel('price') plt.xlabel('date') plt.legend(['current', 'predicted'], loc='best') plt.show() # Save trained model: separate network structure (stored as JSON) and trained weights (stored as HDF5) save_keras_sequential(network, RELATIVE_PATH, filename_to_save)
def learn_nn_and_save(data: StockData, filename_to_save: str): """ Starts the training of the neural network and saves it to the file system Args: data: The data to train on filename_to_save: The filename to save the trained NN to """ dates = data.get_dates() prices = data.get_values() # Generate training data # Build chunks of prices from 100 consecutive days (last_prices) and 101th day (current_price) last_prices, current_price = [], [] for i in range(0, len(prices) - 100): last_prices.append(prices[i:100 + i]) current_price.append(float(prices[100 + i])) network = create_model() network.compile(loss='mean_squared_error', optimizer='adam') # Train the neural network history = network.fit(last_prices, current_price, epochs=10, batch_size=128, verbose=1) # Evaluate the trained neural network and plot results score = network.evaluate(np.array(last_prices), current_price, batch_size=128, verbose=0) logger.debug(f"Test score: {score}") plt.figure() plt.plot(history.history['loss']) plt.title('training loss / testing loss by epoch') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['training', 'testing'], loc='best') plt.figure() current_price_prediction = network.predict(last_prices, batch_size=128) plt.plot(dates[100:], current_price, color="black") # current prices in reality plt.plot(dates[100:], current_price_prediction, color="green") # predicted prices by neural network plt.title('current prices / predicted prices by date') plt.ylabel('price') plt.xlabel('date') plt.legend(['current', 'predicted'], loc='best') plt.show() # Save trained model: separate network structure (stored as JSON) and trained weights (stored as HDF5) save_keras_sequential(network, RELATIVE_PATH, filename_to_save)
def as_trend(self, stock_data: StockData): trends = [] values = stock_data.get_values() for i in range(1, len(values)): if values[i - 1] < values[i]: trends.append(1) elif values[i - 1] > values[i]: trends.append(-1) else: trends.append(0) return trends
def doPredict(self, data: StockData) -> float: """ Use the loaded trained neural network to predict the next stock value. Args: data: historical stock values of a company Returns: predicted next stock value for that company """ # self.model.predict_classes stocks = data.get_values() length = len(stocks) last = stocks[length - (MODEL_LENGTH + 1):] xtrain = [] tuple = [] for j in range(MODEL_LENGTH): increase = (last[j + 1] - last[j]) / last[j] if increase > 1: increase = 1 if increase < -1: increase = -1 tuple.append(increase) xtrain.append(tuple) np_xtrain = np.array(xtrain) result = self.model.predict(np_xtrain) value = result[0][0] absolute_last = data.get_values()[-1] return absolute_last + absolute_last * value
def doPredict(self, data: StockData) -> float: """ Use the loaded trained neural network to predict the next stock value. Args: data: historical stock values of a company Returns: predicted next stock value for that company """ input = numpy.array([data.get_values()[-WINDOW_SIZE:]]) output = self.model.predict(input) print("predicted %f for price %f" % (output[0], data.get_last()[1])) return output[0] + data.get_last()[1]
def learn_nn_and_save(training_data: StockData, test_data: StockData, filename_to_save: str): network = create_model() stocks = training_data.get_values() xtrain = [] ytrain = [] for i in range(len(stocks) - (MODEL_LENGTH + 2)): tuple = [] for j in range(MODEL_LENGTH): increase = (stocks[i + j + 1] - stocks[i + j]) / stocks[i + j] if increase > 1: increase = 1 if increase < -1: increase = -1 tuple.append(increase) xtrain.append(tuple) j = MODEL_LENGTH increase = (stocks[i + j + 1] - stocks[i + j]) / stocks[i + j] if increase > 1: increase = 1 if increase < -1: increase = -1 tuple2 = [] tuple2.append(increase) ytrain.append(tuple2) np_xtrain = np.array(xtrain) np_ytrain = np.array(ytrain) BATCH_SIZE = 100 EPOCHS = 50 network.fit(np_xtrain, np_ytrain, epochs=EPOCHS, batch_size=BATCH_SIZE) stocks = test_data.get_values() xtrain = [] ytrain = [] for i in range(len(stocks) - (MODEL_LENGTH + 2)): tuple = [] for j in range(MODEL_LENGTH): increase = (stocks[i + j + 1] - stocks[i + j]) / stocks[i + j] if increase > 1: increase = 1 if increase < -1: increase = -1 tuple.append(increase) xtrain.append(tuple) j = MODEL_LENGTH increase = (stocks[i + j + 1] - stocks[i + j]) / stocks[i + j] if increase > 1: increase = 1 if increase < -1: increase = -1 tuple2 = [] tuple2.append(increase) ytrain.append(tuple2) np_xtrain = np.array(xtrain) np_ytrain = np.array(ytrain) score = network.evaluate(np_xtrain, np_ytrain, batch_size=BATCH_SIZE) # Save trained model: separate network structure (stored as JSON) and trained weights (stored as HDF5) save_keras_sequential(network, RELATIVE_PATH, filename_to_save)