def predict(modelpath, UNTRAINED_MODEL=False): if UNTRAINED_MODEL: rnn = RNN(HIDDEN_NODES, LOOKBACK, WINDOW_SIZE, SAMPLERATE, 1) else: rnn = loadTrainedModel(modelpath) trainingSet, validationSet, scaler = setup() testSet = readDataset(TEST_SET) if rnn.sampleRate < rnn.windowSize: trainGen = DataGenerator(trainingSet, scaler, windowSize=rnn.windowSize, lookback=rnn.lookBack, sampleRate=rnn.windowSize) validateGen = DataGenerator(validationSet, scaler, windowSize=rnn.windowSize, lookback=rnn.lookBack, sampleRate=rnn.windowSize) testGen = DataGenerator(testSet, scaler, windowSize=rnn.windowSize, lookback=rnn.lookBack, sampleRate=rnn.windowSize) batchLength = rnn.windowSize else: trainGen = DataGenerator(trainingSet, scaler, windowSize=rnn.windowSize, lookback=rnn.lookBack, sampleRate=rnn.sampleRate) validateGen = DataGenerator(validationSet, scaler, windowSize=rnn.windowSize, lookback=rnn.lookBack, sampleRate=rnn.sampleRate) testGen = DataGenerator(testSet, scaler, windowSize=rnn.windowSize, lookback=rnn.lookBack, sampleRate=rnn.sampleRate) batchLength = rnn.sampleRate # or sampleRate * windowSize? trainingSetTrueSize = TRAINING_DATASIZE - trainGen.maxStepIndex - trainGen.minIndex validationSetTrueSize = VALIDATION_DATASIZE - validateGen.maxStepIndex - validateGen.minIndex testSetTrueSize = TEST_DATASIZE - testGen.maxStepIndex - testGen.minIndex trainStep = int(trainingSetTrueSize / batchLength) validateStep = int(validationSetTrueSize / batchLength) testStep = int(testSetTrueSize / batchLength) if DEBUG: print( f"trainStep: {trainStep}, validationStep: {validateStep}, testStep: {testStep}" ) # Model predictions start = time.time() trainPred = rnn.model.predict_generator( trainGen.generator(returnLabel=False), trainStep) end = time.time() if DEBUG: print( f"Time to make {trainPred.shape} training predictions: {end - start:.3f}, training dataset shape {trainingSet.shape}" ) start = time.time() validatePred = rnn.model.predict_generator( validateGen.generator(returnLabel=False), validateStep) end = time.time() if DEBUG: print( f"Time to make {validatePred.shape} validation predictions: {end - start:.3f}, validation dataset shape {validationSet.shape}" ) start = time.time() testPred = rnn.model.predict_generator( testGen.generator(returnLabel=False), testStep) end = time.time() if DEBUG: print( f"Time to make {testPred.shape} test predictions: {end - start:.3f}, test dataset shape {testSet.shape}" ) # Undo the standardization on the predictions trainPred = scaler.inverse_transform(trainPred) validatePred = scaler.inverse_transform(validatePred) testPred = scaler.inverse_transform(testPred) # Sampling like this # | - minIndex - | | - maxStepIndex - | # [ .......... { TRUE SIZE } .............. ] trainingTruth = trainingSet[trainGen. minIndex:-trainGen.maxStepIndex].ravel() validationTruth = validationSet[validateGen.minIndex:-validateGen. maxStepIndex].ravel() testTruth = testSet[testGen.minIndex:-testGen.maxStepIndex].ravel() if DEBUG: print( f"trainingTruth shape: {trainingTruth.shape}, validationTruth shape: {validationTruth.shape}, testTruth shape: {testTruth.shape}" ) groundTruth = np.block([trainingTruth, validationTruth, testTruth]) return trainPred, validatePred, testPred, groundTruth