batch_size=batchsize, epochs=epochs, validation_data=(x_val, y_val)) # Output training history to csv history_df = pd.DataFrame(history.history) history_df.to_csv("training_history.csv") print("Predicting...") result = model.predict(x_test) predicted = pd.DataFrame(result) predicted.columns = ['predicted_nikkei'] predicted['actual_nikkei'] = y_test print("Completed Prediction.") print(predicted.shape) print(predicted[:10]) # Output to csv predicted.to_csv("predicted.csv") # Evaluate evaluation_score = model.evaluate(x_test, y_test, batch_size=batchsize, verbose=1) print("Evaluation score is {}".format(evaluation_score)) # Evaluate benchmark dummy_predicted = DummyPredictor().get_evaluation_score(y_train) dummy_evaluation_score = np.mean((y_test - dummy_predicted) ** 2) print("Dummy evaluation score is {}".format(dummy_evaluation_score)) # Compare comparison = (evaluation_score / dummy_evaluation_score) * 100 print("This prediction model's MSE is {} percent compared to benchmark. (smaller is better)".format(comparison))
sys.dont_write_bytecode = True from data import DataLoader from lstm_model import LSTMModel from feed_forward_model import FeedForwardModel k = 5 results = [] total = 0 for i in range(k): dataLoader = DataLoader(src="MNIST_data/dataset") [ noisyTrain, noisySymbolsTrain, targetsTrain, noisyValidation, noisySymbolsValidation, targetsValidation, noisyTest, noisySymbolsTest, targetsTest ] = dataLoader.getSequencialData(i, 2, 2) model = LSTMModel(2, [512, 512], 1568, 20) model.train(noisySymbolsTrain, targetsTrain, noisySymbolsValidation, targetsValidation, 100, 200) result = model.evaluate(noisySymbolsTest, targetsTest) results.append(result[1]) total += result[1] print(results) print("SCORE: " + str(total / k))
[ noisyTrain, noisySymbolsTrain, targetsTrain, noisyValidation, noisySymbolsValidation, targetsValidation, noisyTest, noisySymbolsTest, targetsTest ] = dataLoader.getSequencialDataWithDontCare(0, 2, 2) model = LSTMModel(2, [512, 512, 512], 1568, 20) model.train(noisyTrain, targetsTrain, noisyValidation, targetsValidation, 100, 100) result = model.evaluate(noisyTest, targetsTest) predictions = model.predict(noisyTest) count = 0 for index in range(len(predictions)): a = utils.argmax(predictions[index][:10]) b = utils.argmax(predictions[index][10:]) predicted = a * 10 + b a = utils.argmax(targetsTest[index][:10]) b = utils.argmax(targetsTest[index][10:]) actual = a * 10 + b if actual == predicted: count += 1