class FeedbackModel(LearningModel): """ Structure: WordEncoder -> WordSP -> WordTM ActionEncoder -> ActionSP -> ActionTM WordTM, ActionTM -> GeneralSP -> GeneralTM """ def __init__(self, wordEncoder, actionEncoder, trainingSet, modulesParams=None): """ @param wordEncoder @param actionEncoder @param trainingSet: A module containing the trainingData, all of its categories and the inputIdx dict that maps each index in categories to an input name. """ super(FeedbackModel, self).__init__(wordEncoder, actionEncoder, trainingSet, modulesParams) self.initModules(trainingSet.categories, trainingSet.inputIdx) self.structure = { 'wordInput': 'wordEnc', 'wordEnc': 'wordSP', 'wordSP': 'wordTM', 'wordTM': 'generalSP', ### 'actionInput': 'actionEnc', 'actionEnc': 'actionSP', 'actionSP': 'actionTM', 'actionTM': 'generalSP', ### 'generalSP': 'generalTM', 'generalTM': None } self.modules = { 'generalTM': self.generalTM, #'generalSP': self.generalSP, 'wordTM': self.wordTM, 'wordSP': self.wordSP, 'wordEnc': self.wordEncoder, 'actionTM': self.actionTM, 'actionSP': self.actionSP, 'actionEnc': self.actionEncoder } #self.layer = Layer(self.structure, self.modules, self.classifier) def initModules(self, categories, inputIdx): modulesNames = { 'wordSP', 'wordTM', 'actionSP', 'actionTM', 'generalTM' } if (self.modulesParams is not None) and\ (set(self.modulesParams) == modulesNames): self.modulesParams['wordSP'].update(self.defaultWordSPParams) self.modulesParams['wordTM'].update(self.defaultWordTMParams) self.modulesParams['actionSP'].update(self.defaultActionSPParams) self.modulesParams['actionTM'].update(self.defaultActionTMParams) self.wordSP = SpatialPooler(**self.modulesParams['wordSP']) self.wordTM = TemporalMemory(**self.modulesParams['wordTM']) self.actionSP = SpatialPooler(**self.modulesParams['actionSP']) self.actionTM = TemporalMemory(**self.modulesParams['actionTM']) defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.modulesParams['generalTM'].update(defaultGeneralTMParams) self.generalTM = TemporalMemory(**self.modulesParams['generalTM']) print("Using external Parameters!") else: self.wordSP = SpatialPooler(**self.defaultWordSPParams) self.wordTM = TemporalMemory(**self.defaultWordTMParams) self.actionSP = SpatialPooler(**self.defaultActionSPParams) self.actionTM = TemporalMemory(**self.defaultActionTMParams) print("External parameters invalid or not found, using"\ " the default ones") defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.generalTM = TemporalMemory(**defaultGeneralTMParams) self.classifier = CLAClassifierCond(steps=[1, 2, 3], alpha=0.1, actValueAlpha=0.3, verbosity=0) self.startPointOverlap = CommonOverlap('==', 1, self.actionTM.columnDimensions, threshold=0.5) def processInput(self, sentence, actionSeq, wordSDR=None, actionSDR=None, verbosity=0, learn=True): if wordSDR is None: wordSDR = numpy.zeros(self.wordSP.getColumnDimensions(), dtype=numpy.uint8) if actionSDR is None: actionSDR = numpy.zeros(self.actionSP.getColumnDimensions(), dtype=numpy.uint8) nCellsFromSentence = self.generalTM.columnDimensions[1] sentenceActiveCells = set() actionSeqActiveCells = set() recordNum = 0 # Feed the words from the sentence to the region 1 for word in sentence: encodedWord = self.wordEncoder.encode(word) self.wordSP.compute(encodedWord, learn, wordSDR) self.wordTM.compute(set(numpy.where(wordSDR > 0)[0]), learn) region1Predicting = (self.wordTM.predictiveCells != set()) sentenceActiveCells.update(self.wordTM.getActiveCells()) #print("{} - {}".format(word, )) retVal = self.classifier.compute( recordNum=recordNum, patternNZ=self.wordTM.getActiveCells(), classification={ 'bucketIdx': self.wordEncoder.getBucketIndices(word)[0], 'actValue': word }, learn=learn, infer=True, conditionFunc=lambda x: x.endswith("-event")) recordNum += 1 bestPredictions = [] for step in retVal: if step == 'actualValues': continue higherProbIndex = numpy.argmax(retVal[step]) bestPredictions.append(retVal['actualValues'][higherProbIndex]) if region1Predicting: # Feed the sentence to the region 2 self.generalTM.compute(sentenceActiveCells, learn) generalPrediction = set( self.generalTM.mapCellsToColumns( self.generalTM.predictiveCells).keys()) # Normalize predictions so cells stay in the actionTM # range. generalPrediction = set([ i - nCellsFromSentence for i in generalPrediction if i >= nCellsFromSentence ]) # columnsPrediction = numpy.zeros( # self.actionSP.getNumColumns(), # dtype=numpy.uint8 # ) # columnsPrediction[self.actionTM.mapCellsToColumns( # generalPrediction).keys()] = 1 # self.startPointOverlap.updateCounts(columnsPrediction) # # if len(actionSeq) <= 0: # # assert region1Predicting, "Region 1 is not predicting, consider "\ # "training the model for a longer time" # predictedValues = [] # # firstColumns = numpy.where(numpy.bitwise_and(columnsPrediction > 0, # self.startPointOverlap.commonElements)) # # predictedEnc = numpy.zeros(self.actionEncoder.getWidth(), # dtype=numpy.uint8) # predictedEnc[ # [self.actionSP._mapColumn(col) for col in firstColumns]] = 1 # predictedValues.append(self.actionEncoder.decode(predictedEnc)) # # print(firstColumns) # # self.actionTM.predictiveCells.update(generalPrediction) # self.actionTM.compute(firstColumns, learn) # # predictedColumns = self.actionTM.mapCellsToColumns( # self.actionTM.predictiveCells).keys()[0] for action in actionSeq: encodedAction = self.actionEncoder.encode(action) # Use the predicted cells from region 2 to bias the # activity of cells in region 1. if region1Predicting: self.actionTM.predictiveCells.update(generalPrediction) self.actionSP.compute(encodedAction, learn, actionSDR) self.actionTM.compute(set(numpy.where(actionSDR > 0)[0]), learn) actionActiveCells = [ i + nCellsFromSentence for i in self.actionTM.getActiveCells() ] actionSeqActiveCells.update(actionActiveCells) self.classifier.compute( recordNum=recordNum, patternNZ=actionActiveCells, classification={ 'bucketIdx': self.wordEncoder.getWidth() + self.actionEncoder.getBucketIndices(action)[0], 'actValue': action }, learn=learn, infer=True, conditionFunc=lambda x: x.endswith("-event")) recordNum += 1 if region1Predicting: self.generalTM.compute(actionSeqActiveCells, True) if verbosity > 0: print('Best Predictions: ' + str(bestPredictions)) if verbosity > 3: print(" | CLAClassifier best predictions for step1: ") top = sorted(retVal[1].tolist(), reverse=True)[:3] for prob in top: probIndex = retVal[1].tolist().index(prob) print( str(retVal['actualValues'][probIndex]) + " - " + str(prob)) print(" | CLAClassifier best predictions for step2: ") top = sorted(retVal[2].tolist(), reverse=True)[:3] for prob in top: probIndex = retVal[2].tolist().index(prob) print( str(retVal['actualValues'][probIndex]) + " - " + str(prob)) print("") print("---------------------------------------------------") print("") return bestPredictions def train(self, numIterations, trainingData=None, maxTime=-1, verbosity=0): """ @param numIterations @param trainingData @param maxTime: (default: -1) Training stops if maxTime (in minutes) is exceeded. Note that this may interrupt an ongoing train ireration. -1 is no time restrictions. @param verbosity: (default: 0) How much verbose about the process. 0 doesn't print anything. """ startTime = time.time() maxTimeReached = False recordNum = 0 if trainingData is None: trainingData = self.trainingData wordSDR = numpy.zeros(self.wordSP.getColumnDimensions(), dtype=numpy.uint8) actionSDR = numpy.zeros(self.actionSP.getColumnDimensions(), dtype=numpy.uint8) #generalSDR = numpy.zeros(self.generalSP.getColumnDimensions(), # dtype=numpy.uint8) generalInput = numpy.zeros(self.generalTM.numberOfColumns(), dtype=numpy.uint8) for iteration in xrange(numIterations): print("Iteration " + str(iteration)) for sentence, actionSeq in trainingData: self.processInput(sentence, actionSeq, wordSDR, actionSDR) self.reset() recordNum += 1 if maxTime > 0: elapsedMinutes = (time.time() - startTime) * (1.0 / 60.0) if elapsedMinutes > maxTime: maxTimeReached = True print("maxTime reached, training stoped at iteration "\ "{}!".format(self.iterationsTrained)) break if maxTimeReached: break self.iterationsTrained += 1 def inputSentence(self, sentence, verbosity=1, learn=False): return self.processInput(sentence, [], verbosity=verbosity, learn=learn)
class FeedbackModel(LearningModel): """ Structure: WordEncoder -> WordSP -> WordTM ActionEncoder -> ActionSP -> ActionTM WordTM, ActionTM -> GeneralSP -> GeneralTM """ def __init__(self, wordEncoder, actionEncoder, trainingSet, modulesParams=None): """ @param wordEncoder @param actionEncoder @param trainingSet: A module containing the trainingData, all of its categories and the inputIdx dict that maps each index in categories to an input name. """ super(FeedbackModel, self).__init__(wordEncoder, actionEncoder, trainingSet, modulesParams) self.initModules(trainingSet.categories, trainingSet.inputIdx) self.structure = { 'wordInput': 'wordEnc', 'wordEnc': 'wordSP', 'wordSP': 'wordTM', 'wordTM': 'generalSP', ### 'actionInput': 'actionEnc', 'actionEnc': 'actionSP', 'actionSP': 'actionTM', 'actionTM': 'generalSP', ### 'generalSP': 'generalTM', 'generalTM': None } self.modules = { 'generalTM': self.generalTM, #'generalSP': self.generalSP, 'wordTM': self.wordTM, 'wordSP': self.wordSP, 'wordEnc': self.wordEncoder, 'actionTM': self.actionTM, 'actionSP': self.actionSP, 'actionEnc': self.actionEncoder } #self.layer = Layer(self.structure, self.modules, self.classifier) def initModules(self, categories, inputIdx): modulesNames = {'wordSP', 'wordTM', 'actionSP', 'actionTM', 'generalTM'} if (self.modulesParams is not None) and\ (set(self.modulesParams) == modulesNames): self.modulesParams['wordSP'].update(self.defaultWordSPParams) self.modulesParams['wordTM'].update(self.defaultWordTMParams) self.modulesParams['actionSP'].update(self.defaultActionSPParams) self.modulesParams['actionTM'].update(self.defaultActionTMParams) self.wordSP = SpatialPooler(**self.modulesParams['wordSP']) self.wordTM = TemporalMemory(**self.modulesParams['wordTM']) self.actionSP = SpatialPooler(**self.modulesParams['actionSP']) self.actionTM = TemporalMemory(**self.modulesParams['actionTM']) defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.modulesParams['generalTM'].update(defaultGeneralTMParams) self.generalTM = TemporalMemory(**self.modulesParams['generalTM']) print("Using external Parameters!") else: self.wordSP = SpatialPooler(**self.defaultWordSPParams) self.wordTM = TemporalMemory(**self.defaultWordTMParams) self.actionSP = SpatialPooler(**self.defaultActionSPParams) self.actionTM = TemporalMemory(**self.defaultActionTMParams) print("External parameters invalid or not found, using"\ " the default ones") defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.generalTM = TemporalMemory(**defaultGeneralTMParams) self.classifier = CLAClassifierCond( steps=[1, 2, 3], alpha=0.1, actValueAlpha=0.3, verbosity=0 ) self.startPointOverlap = CommonOverlap('==', 1, self.actionTM.columnDimensions, threshold=0.5) def processInput(self, sentence, actionSeq, wordSDR=None, actionSDR=None, verbosity=0, learn=True): if wordSDR is None: wordSDR = numpy.zeros(self.wordSP.getColumnDimensions(), dtype=numpy.uint8) if actionSDR is None: actionSDR = numpy.zeros(self.actionSP.getColumnDimensions(), dtype=numpy.uint8) nCellsFromSentence = self.generalTM.columnDimensions[1] sentenceActiveCells = set() actionSeqActiveCells = set() recordNum = 0 # Feed the words from the sentence to the region 1 for word in sentence: encodedWord = self.wordEncoder.encode(word) self.wordSP.compute(encodedWord, learn, wordSDR) self.wordTM.compute( set(numpy.where(wordSDR > 0)[0]), learn ) region1Predicting = (self.wordTM.predictiveCells != set()) sentenceActiveCells.update(self.wordTM.getActiveCells()) #print("{} - {}".format(word, )) retVal = self.classifier.compute( recordNum=recordNum, patternNZ=self.wordTM.getActiveCells(), classification={ 'bucketIdx': self.wordEncoder.getBucketIndices(word)[0], 'actValue': word }, learn=learn, infer=True, conditionFunc=lambda x: x.endswith("-event") ) recordNum += 1 bestPredictions = [] for step in retVal: if step == 'actualValues': continue higherProbIndex = numpy.argmax(retVal[step]) bestPredictions.append( retVal['actualValues'][higherProbIndex] ) if region1Predicting: # Feed the sentence to the region 2 self.generalTM.compute(sentenceActiveCells, learn) generalPrediction = set(self.generalTM.mapCellsToColumns( self.generalTM.predictiveCells ).keys()) # Normalize predictions so cells stay in the actionTM # range. generalPrediction = set([i - nCellsFromSentence for i in generalPrediction if i >= nCellsFromSentence]) # columnsPrediction = numpy.zeros( # self.actionSP.getNumColumns(), # dtype=numpy.uint8 # ) # columnsPrediction[self.actionTM.mapCellsToColumns( # generalPrediction).keys()] = 1 # self.startPointOverlap.updateCounts(columnsPrediction) # # if len(actionSeq) <= 0: # # assert region1Predicting, "Region 1 is not predicting, consider "\ # "training the model for a longer time" # predictedValues = [] # # firstColumns = numpy.where(numpy.bitwise_and(columnsPrediction > 0, # self.startPointOverlap.commonElements)) # # predictedEnc = numpy.zeros(self.actionEncoder.getWidth(), # dtype=numpy.uint8) # predictedEnc[ # [self.actionSP._mapColumn(col) for col in firstColumns]] = 1 # predictedValues.append(self.actionEncoder.decode(predictedEnc)) # # print(firstColumns) # # self.actionTM.predictiveCells.update(generalPrediction) # self.actionTM.compute(firstColumns, learn) # # predictedColumns = self.actionTM.mapCellsToColumns( # self.actionTM.predictiveCells).keys()[0] for action in actionSeq: encodedAction = self.actionEncoder.encode(action) # Use the predicted cells from region 2 to bias the # activity of cells in region 1. if region1Predicting: self.actionTM.predictiveCells.update(generalPrediction) self.actionSP.compute(encodedAction, learn, actionSDR) self.actionTM.compute( set(numpy.where(actionSDR > 0)[0]), learn ) actionActiveCells = [i + nCellsFromSentence for i in self.actionTM.getActiveCells()] actionSeqActiveCells.update(actionActiveCells) self.classifier.compute( recordNum=recordNum, patternNZ=actionActiveCells, classification={ 'bucketIdx': self.wordEncoder.getWidth() + self.actionEncoder.getBucketIndices(action)[0], 'actValue': action }, learn=learn, infer=True, conditionFunc=lambda x: x.endswith("-event") ) recordNum += 1 if region1Predicting: self.generalTM.compute( actionSeqActiveCells, True ) if verbosity > 0: print('Best Predictions: ' + str(bestPredictions)) if verbosity > 3: print(" | CLAClassifier best predictions for step1: ") top = sorted(retVal[1].tolist(), reverse=True)[:3] for prob in top: probIndex = retVal[1].tolist().index(prob) print(str(retVal['actualValues'][probIndex]) + " - " + str(prob)) print(" | CLAClassifier best predictions for step2: ") top = sorted(retVal[2].tolist(), reverse=True)[:3] for prob in top: probIndex = retVal[2].tolist().index(prob) print(str(retVal['actualValues'][probIndex]) + " - " + str(prob)) print("") print("---------------------------------------------------") print("") return bestPredictions def train(self, numIterations, trainingData=None, maxTime=-1, verbosity=0): """ @param numIterations @param trainingData @param maxTime: (default: -1) Training stops if maxTime (in minutes) is exceeded. Note that this may interrupt an ongoing train ireration. -1 is no time restrictions. @param verbosity: (default: 0) How much verbose about the process. 0 doesn't print anything. """ startTime = time.time() maxTimeReached = False recordNum = 0 if trainingData is None: trainingData = self.trainingData wordSDR = numpy.zeros(self.wordSP.getColumnDimensions(), dtype=numpy.uint8) actionSDR = numpy.zeros(self.actionSP.getColumnDimensions(), dtype=numpy.uint8) #generalSDR = numpy.zeros(self.generalSP.getColumnDimensions(), # dtype=numpy.uint8) generalInput = numpy.zeros(self.generalTM.numberOfColumns(), dtype=numpy.uint8) for iteration in xrange(numIterations): print("Iteration " + str(iteration)) for sentence, actionSeq in trainingData: self.processInput(sentence, actionSeq, wordSDR, actionSDR) self.reset() recordNum += 1 if maxTime > 0: elapsedMinutes = (time.time() - startTime) * (1.0 / 60.0) if elapsedMinutes > maxTime: maxTimeReached = True print("maxTime reached, training stoped at iteration "\ "{}!".format(self.iterationsTrained)) break if maxTimeReached: break self.iterationsTrained += 1 def inputSentence(self, sentence, verbosity=1, learn=False): return self.processInput(sentence, [], verbosity=verbosity, learn=learn)
def testNumberOfColumns(self): tm = TemporalMemory(columnDimensions=[64, 64], cellsPerColumn=32) self.assertEqual(tm.numberOfColumns(), 64 * 64)
def testNumberOfColumns(self): tm = TemporalMemory( columnDimensions=[64, 64], cellsPerColumn=32 ) self.assertEqual(tm.numberOfColumns(), 64 * 64)
tm = TM( columnDimensions=(50, ), cellsPerColumn=2, initialPermanence=0.5, connectedPermanence=0.5, minThreshold=8, maxNewSynapseCount=20, permanenceIncrement=0.1, permanenceDecrement=0.0, activationThreshold=8, ) # Step 2: create input vectors to feed to the temporal pooler. Each input vector # must be numberOfCols wide. Here we create a simple sequence of 5 vectors # representing the sequence A -> B -> C -> D -> E x = numpy.zeros((5, tm.numberOfColumns()), dtype="uint32") x[0, 0:10] = 1 # Input SDR representing "A", corresponding to columns 0-9 x[1, 10:20] = 1 # Input SDR representing "B", corresponding to columns 10-19 x[2, 20:30] = 1 # Input SDR representing "C", corresponding to columns 20-29 x[3, 30:40] = 1 # Input SDR representing "D", corresponding to columns 30-39 x[4, 40:50] = 1 # Input SDR representing "E", corresponding to columns 40-49 # Step 3: send this simple sequence to the temporal memory for learning # We repeat the sequence 10 times for i in range(10): # Send each letter in the sequence in order for j in range(5): activeColumns = set([i for i, j in zip(count(), x[j]) if j == 1]) # The compute method performs one step of learning and/or inference. Note:
tm = TM(columnDimensions = (50,), cellsPerColumn=2, initialPermanence=0.5, connectedPermanence=0.5, minThreshold=10, maxNewSynapseCount=20, permanenceIncrement=0.1, permanenceDecrement=0.0, activationThreshold=8, ) # Step 2: create input vectors to feed to the temporal pooler. Each input vector # must be numberOfCols wide. Here we create a simple sequence of 5 vectors # representing the sequence A -> B -> C -> D -> E x = numpy.zeros((5, tm.numberOfColumns()), dtype="uint32") x[0, 0:10] = 1 # Input SDR representing "A", corresponding to columns 0-9 x[1, 10:20] = 1 # Input SDR representing "B", corresponding to columns 10-19 x[2, 20:30] = 1 # Input SDR representing "C", corresponding to columns 20-29 x[3, 30:40] = 1 # Input SDR representing "D", corresponding to columns 30-39 x[4, 40:50] = 1 # Input SDR representing "E", corresponding to columns 40-49 # Step 3: send this simple sequence to the temporal memory for learning # We repeat the sequence 10 times for i in range(10): # Send each letter in the sequence in order for j in range(5): activeColumns = set([i for i, j in zip(count(), x[j]) if j == 1])
for x in range(xRange): for y in range(yRange): V = EncodeVector(random.randint(-100, 100), random.randint(-100, 100)) u, v = DecodeVector(V) #print "(%d, %d) = v(%d, %d)" % (x, y, u, v) #plt.quiver(x, y, u, v, pivot='mid', scale=10, units='dots', width=1) Vec = numpy.append(Vec, V) activeColumns = set([j for j, k in zip(count(), Vec) if k == 1]) tm.compute(activeColumns, learn = False) activeColumnsIndeces = [tm.columnForCell(i) for i in tm.getActiveCells()] predictedColumnIndeces = [tm.columnForCell(i) for i in tm.getPredictiveCells()] actColState = [1 if i in activeColumnsIndeces else 0 for i in range(tm.numberOfColumns())] predColState = [1 if i in predictedColumnIndeces else 0 for i in range(tm.numberOfColumns())] z = 0 for x in range(xRange): for y in range(yRange): AV = actColState[z: z + UnitEncoder.getWidth() * 2] PV = predColState[z: z + UnitEncoder.getWidth() * 2] PV = numpy.asarray( PV ) u, v = DecodeVector( PV ) if u != -999 and v != -999: print "(%d, %d) = v(%d, %d)" % (x, y, u, v) plt.quiver(x, y, u, v, pivot='mid', scale=10, units='dots', width=1) else: print "(%d, %d) = v(unpredictable)" % (x, y) plt.quiver(x, y, 100, 100, pivot='mid', scale=10, units='dots', width=1, color='r')
tm = TM(columnDimensions = (50,), cellsPerColumn=2, initialPermanence=0.5, connectedPermanence=0.5, minThreshold=8, maxNewSynapseCount=20, permanenceIncrement=0.1, permanenceDecrement=0.0, activationThreshold=8, ) # Step 2: create input vectors to feed to the temporal memory. Each input vector # must be numberOfCols wide. Here we create a simple sequence of 5 vectors # representing the sequence A -> B -> C -> D -> E x = numpy.zeros((5, tm.numberOfColumns()), dtype="uint32") x[0, 0:10] = 1 # Input SDR representing "A", corresponding to columns 0-9 x[1, 10:20] = 1 # Input SDR representing "B", corresponding to columns 10-19 x[2, 20:30] = 1 # Input SDR representing "C", corresponding to columns 20-29 x[3, 30:40] = 1 # Input SDR representing "D", corresponding to columns 30-39 x[4, 40:50] = 1 # Input SDR representing "E", corresponding to columns 40-49 # Step 3: send this simple sequence to the temporal memory for learning # We repeat the sequence 10 times for i in range(10): # Send each letter in the sequence in order for j in range(5): activeColumns = set([i for i, j in zip(count(), x[j]) if j == 1])