dFLOa = FeatureLayerOut.Grad(dAL[:3], t) FeatureLayerIn.Grad(dFLOa, t) #Unreavale the context layer with respect to the analysis input to compute the gradient. for ts in reversed(range(len(data[str(i)]['context']))): dCLO = ContextOut.Grad(dAL[3:], ts) dCLI = ContextIn.Grad(dCLO, ts) #Get new update reacted to Time Sub with respect to both the context input and the Analysis result #Add these gradients to the existing calculated values generated at the Analysis stage. dFLOc = FeatureLayerOut.Grad(dCLI, ts) FeatureLayerIn.Grad(dFLOc, ts) #if we are on a Display epoch display the data give for each analysis, the result, and the target # for what it should have been if epoch % display == 0: print(str(data[str(i)]['analysis'][t]) + " " + str(sample[t]) + " " + str(data[str(i)]['target'][t])) #update the layers for this Data Block FeatureLayerIn.update(learning_rate=0.1) FeatureLayerOut.update(learning_rate=.1) ContextIn.update(learning_rate=.1) ContextOut.update(learning_rate=.1) AnalysisLayer.update(learning_rate=.1) #add an epoch number to help with display if epoch % display == 0: print("Epoch: " + str(epoch)) print() epoch += 1