def evaluateAndPlot(loss, model, trainData, testData, numTrainSeqPlot, trainPlotSeqLength, plotPrefix, plotDir): plotPath = plotDir + f'/{plotPrefix}_loss.png' Plot.plotLoss(loss, savePath=plotPath, saveOnly=True) for i in range(numTrainSeqPlot): idx = np.random.randint(0, trainData.shape[0] - trainPlotSeqLength) seq = trainData[idx:idx + trainPlotSeqLength] evalLoss, Ypred = model.evaluate(seq, returnPred=True) Ytrue = seq[1:] print(f'Train Eval Loss: {evalLoss}') plotPath = plotDir + f'/{plotPrefix}_train{i}.png' Plot.plotPredTrue(Ypred, Ytrue, 'Train Data', savePath=plotPath, saveOnly=True) testLoss, Ypred = model.evaluate(testData, returnPred=True) Ytrue = testData[1:] print(f'Test Eval Loss: {testLoss}') plotPath = plotDir + f'/{plotPrefix}_test.png' Plot.plotPredTrue(Ypred, Ytrue, 'Test Data', savePath=plotPath, saveOnly=True) lossPath = plotDir + f'/{plotPrefix}_loss' fl = open(lossPath, 'w') fl.write(f'Final Train Loss: {loss[-1]}\n' + f'Test Loss: {testLoss}') fl.close()
def tryModelMultiSeq(trainSequences, testData, plotPrefix, plotDir): model = LstmForecast( forecastHorizon=1, stateSize=10, activation='tanh', numRnnLayers=1 ) loss = model.train( trainSequences=trainSequences, numIterations=15, optimizer=tf.keras.optimizers.Adam( learning_rate=tf.keras.optimizers.schedules.ExponentialDecay( 0.08, 40, 0.98 ) ) ) plotPath = plotDir + f'/{plotPrefix}_loss.png' Plot.plotLoss(loss, savePath=plotPath, saveOnly=True) trainPlot = 5 for i, idx in enumerate(list(np.random.randint(0, len(trainSequences), size=(trainPlot,)))): seq = trainSequences[idx] evalLoss, Ypred = model.evaluate(seq, returnPred=True) Ytrue = seq[1:] print(f'Training Eval Loss: {evalLoss}') plotPath = plotDir + f'/{plotPrefix}_train{i}.png' Plot.plotPredTrue( Ypred, Ytrue, 'Train Data', savePath=plotPath, saveOnly=True ) testLoss, Ypred = model.evaluate(testData, returnPred=True) Ytrue = testData[1:] print(f'Test Eval Loss: {testLoss}') plotPath = plotDir + f'/{plotPrefix}_test.png' Plot.plotPredTrue(Ypred, Ytrue, 'Test Data', savePath=plotPath, saveOnly=True) lossPath = plotDir + f'/{plotPrefix}_loss' fl = open(lossPath, 'w') fl.write( f'Final Train Loss: {loss[-1]}\n' + f'Test Loss: {testLoss}' ) fl.close()
def main(): n = 21500 trainN = 21000 seqLength = 500 numSeqPlot = 5 trainData, testData = Utility.trainTestSplit( StandardGenerator('long_term').generate(n), trainN ) trainSequences = Utility.breakSeq(trainData, seqLength=seqLength) # for i in range(numSeqPlot): # Plot.plotDataCols(trainSequences[ # np.random.randint(0, len(trainSequences)) # ]) model = LstmForecast( forecastHorizon=1, stateSize=50, activation='tanh', numRnnLayers=3 ) model.model.summary() loss = model.train( trainSequences=trainSequences, numIterations=15, optimizer=tf.keras.optimizers.Adam( learning_rate=tf.keras.optimizers.schedules.ExponentialDecay( 0.01, 20, 0.96 ) ) ) Plot.plotLoss(loss) for i in range(numSeqPlot): idx = np.random.randint(0, len(trainSequences)) evalLoss, Ypred = model.evaluate(trainSequences[idx], returnPred=True) Ytrue = trainSequences[idx][1:] Plot.plotPredTrue(Ypred, Ytrue, 'On Train') evalLoss, Ypred = model.evaluate(testData, returnPred=True) Ytrue = testData[1:] Plot.plotPredTrue(Ypred, Ytrue, 'On Test')
def main(): n = 20200 trainN = 20000 seqLength = 500 data = np.expand_dims(StandardGenerator('long_term').generate(n), axis=1) trainData, testData = Utility.trainTestSplit(data, trainN) trainSequences = Utility.breakTrainSeq(trainData, None, seqLength) forecastHorizon = 1 lag = 30 model = DeepNN( forecastHorizon=forecastHorizon, lag=lag, numUnitsPerLayer=10, numLayers=2, numTargetVariables=1, numExoVariables=0 ) loss = model.train( trainSequences=trainSequences, numIterations=20, optimizer=tf.keras.optimizers.Adam( learning_rate=tf.keras.optimizers.schedules.ExponentialDecay( 0.1, 25, 0.97 ) ), verboseLevel=2, returnLosses=True ) Plot.plotLoss(loss) evalLoss, Ypred = model.evaluate( testData, returnPred=True ) Ytrue = testData[lag + forecastHorizon:, :] print(f'Eval Loss: {evalLoss}') Plot.plotPredTrue(Ypred, Ytrue)
def main(): n = 21500 trainN = 21000 seqLength = 500 numSeqPlot = 5 trainData, testData = Utility.trainTestSplit( StandardGenerator('long_term').generate(n), trainN) model = ExtremeTime2(forecastHorizon=1, memorySize=20, windowSize=10, embeddingSize=10, contextSize=10) loss = model.train( targetSeries=trainData, sequenceLength=seqLength, numIterations=10, optimizer=tf.keras.optimizers.Adam( learning_rate=tf.keras.optimizers.schedules.ExponentialDecay( 0.01, 50, 0.99)), verboseLevel=1, returnLosses=True) Plot.plotLoss(loss) for i in range(numSeqPlot): idx = np.random.randint(0, trainN - seqLength) seq = trainData[idx:idx + seqLength] evalLoss, Ypred = model.evaluate(seq, returnPred=True) Ytrue = seq[1:] print(f'Train Eval Loss: {evalLoss}') Plot.plotPredTrue(Ypred, Ytrue, 'On Train') evalLoss, Ypred = model.evaluate(testData, returnPred=True) Ytrue = testData[1:] print(f'Test Eval Loss: {evalLoss}') Plot.plotPredTrue(Ypred, Ytrue, 'On Test')