def lstm_prediction(indicator_file_name, data_input): modelh5_path = Configuration.get_file_model_file(indicator_file_name) # initialize the Recurrent Neural Network # Solving pattern with sequence of data regressor = keras.models.load_model(modelh5_path) predicted_price = regressor.predict(data_input) result = { 'summary': str(regressor.summary()), 'predicted_price': predicted_price } return result
def initialise_model_lstm(indicator_file_name, timesteps, test_set_input, test_set_output, train_set_input, train_set_output, epochs): """[summary] Args: indicator_file_name ([type]): [description] timesteps ([type]): [description] test_set_input ([type]): [description] test_set_output ([type]): [description] train_set_input ([type]): [description] train_set_output ([type]): [description] epochs ([type]): [description] Returns: [type]: [description] """ keras_model_directory = Configuration.get_directory(indicator_file_name) modelh5_path = Configuration.get_file_model_file(indicator_file_name) # define the RNN input shape lstm_input_shape = (timesteps, 1) # initialize the Recurrent Neural Network # Solving pattern with sequence of data regressor = keras.models.Sequential() training.update_state(state="PROGRESS", meta={'progress': 70}) neurons = 50 batch_size = 32 # Long Short Term Memory model - # supervised Deep Neural Network that is very good at doing time-series prediction. # adding the first LSTM Layer and some Dropout regularisation # # Dropout: Makes sure that network can never rely on any given activation # to be present because at any moment they could become squashed i.e. value = 0 # forced to learn a redunant representation for everything # # return sequences: We want output after every layer in which will be # passed to the next layer regressor.add( keras.layers.LSTM(units=neurons, return_sequences=True, input_shape=lstm_input_shape)) regressor.add(keras.layers.Dropout(0.2)) # adding a second LSTM Layer and some Dropout regularisation regressor.add(keras.layers.LSTM(units=neurons, return_sequences=True)) regressor.add(keras.layers.Dropout(0.2)) # adding a third LSTM Layer and some Dropout regularisation regressor.add(keras.layers.LSTM(units=neurons, return_sequences=True)) regressor.add(keras.layers.Dropout(0.2)) # adding a fourth LSTM Layer and some Dropout regularisation regressor.add(keras.layers.LSTM(units=neurons)) regressor.add(keras.layers.Dropout(0.2)) # adding the output layer # Dense format output layer regressor.add(keras.layers.Dense(units=1)) # prediction # compile network # find the global minimal point regressor.compile(optimizer='adam', loss='mean_absolute_error') regressor.summary() accuracy = regressor.evaluate(test_set_input, test_set_output) print('Restored model, accuracy: {:5.2f}%'.format(100 * accuracy)) # fitting the RNN to the training set # giving input in batch sizes of 5 # loss should decrease on each an every epochs history = regressor.fit(train_set_input, train_set_output, batch_size=batch_size, epochs=epochs, steps_per_epoch=5, validation_data=(test_set_input, test_set_output), verbose=0, callbacks=[KerasFitCallback(training, epochs)]) # save the model for javascript format readable tfjs.converters.save_keras_model(regressor, keras_model_directory) # save the model for python format readable regressor.save(modelh5_path) predicted_price = regressor.predict(test_set_input) result = { 'summary': str(regressor.summary()), 'history': str(history), 'predicted_price': predicted_price } return result