def runTMtrainingPhase(experiment): # Train only the Temporal Memory on the generated sequences if trainingPasses > 0: print "\nTraining Temporal Memory..." if consoleVerbosity > 0: print "\nPass\tBursting Columns Mean\tStdDev\tMax" for i in xrange(trainingPasses): experiment.runNetworkOnSequences( generatedSequences, labeledSequences, tmLearn=True, upLearn=None, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 0: stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) # Reset the TM monitor mixin's records accrued during this training pass # experiment.tm.mmClearHistory() print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print
def runTMtrainingPhase(experiment): # Train only the Temporal Memory on the generated sequences if trainingPasses > 0: print "\nTraining Temporal Memory..." if consoleVerbosity > 0: print "\nPass\tBursting Columns Mean\tStdDev\tMax" for i in xrange(trainingPasses): experiment.runNetworkOnSequences(generatedSequences, labeledSequences, tmLearn=True, upLearn=None, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 0: stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) # Reset the TM monitor mixin's records accrued during this training pass # experiment.tm.mmClearHistory() print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print
def trainTwoPass(runner, exhaustiveAgents, completeSequenceLength, verbosity): print "Training temporal memory..." sequences = runner.generateSequences(completeSequenceLength * TWOPASS_TM_TRAINING_REPS, exhaustiveAgents, verbosity=verbosity) runner.feedLayers(sequences, tmLearn=True, tpLearn=False, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print print MonitorMixinBase.mmPrettyPrintMetrics( runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print print "Training temporal pooler..." runner.tm.mmClearHistory() runner.tp.mmClearHistory() sequences = runner.generateSequences(completeSequenceLength * TWOPASS_TP_TRAINING_REPS, exhaustiveAgents, verbosity=verbosity) runner.feedLayers(sequences, tmLearn=False, tpLearn=True, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print print MonitorMixinBase.mmPrettyPrintMetrics( runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print
def runTestPhase(runner, randomAgents, numWorlds, numElements, completeSequenceLength, verbosity): print "Testing (worlds: {0}, elements: {1})...".format( numWorlds, numElements) runner.tm.mmClearHistory() runner.tp.mmClearHistory() sequences = runner.generateSequences(completeSequenceLength / NUM_TEST_SEQUENCES, randomAgents, verbosity=verbosity, numSequences=NUM_TEST_SEQUENCES) runner.feedLayers(sequences, tmLearn=False, tpLearn=False, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print "Done testing.\n" if verbosity >= 2: print "Overlap:" print print runner.tp.mmPrettyPrintDataOverlap() print print MonitorMixinBase.mmPrettyPrintMetrics( runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print
def trainTwoPass(runner, exhaustiveAgents, completeSequenceLength, verbosity): print "Training temporal memory..." sequences = runner.generateSequences(completeSequenceLength * TWOPASS_TM_TRAINING_REPS, exhaustiveAgents, verbosity=verbosity) runner.feedLayers(sequences, tmLearn=True, tpLearn=False, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print print MonitorMixinBase.mmPrettyPrintMetrics(runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print print "Training temporal pooler..." runner.tm.mmClearHistory() runner.tp.mmClearHistory() sequences = runner.generateSequences(completeSequenceLength * TWOPASS_TP_TRAINING_REPS, exhaustiveAgents, verbosity=verbosity) runner.feedLayers(sequences, tmLearn=False, tpLearn=True, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print print MonitorMixinBase.mmPrettyPrintMetrics(runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print
def trainTemporalMemory(experiment, inputSequences, inputCategories, trainingPasses, consoleVerbosity): burstingColsString = "" for i in xrange(trainingPasses): experiment.runNetworkOnSequences( inputSequences, inputCategories, tmLearn=True, upLearn=None, classifierLearn=False, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 1: print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print stats = experiment.getBurstingColumnsStats() burstingColsString += "{0}\t{1}\t{2}\t{3}\n".format( i, stats[0], stats[1], stats[2]) experiment.tm.mmClearHistory() experiment.up.mmClearHistory() if consoleVerbosity > 0: print "\nTemporal Memory Bursting Columns stats..." print "Pass\tMean\t\tStdDev\t\tMax" print burstingColsString
def trainTemporalMemory(experiment, inputSequences, inputCategories, trainingPasses, consoleVerbosity): burstingColsString = "" for i in xrange(trainingPasses): experiment.runNetworkOnSequences(inputSequences, inputCategories, tmLearn=True, upLearn=None, classifierLearn=False, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 1: print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print stats = experiment.getBurstingColumnsStats() burstingColsString += "{0}\t{1}\t{2}\t{3}\n".format(i, stats[0], stats[1], stats[2]) experiment.tm.mmClearHistory() experiment.up.mmClearHistory() if consoleVerbosity > 0: print "\nTemporal Memory Bursting Columns stats..." print "Pass\tMean\t\tStdDev\t\tMax" print burstingColsString
def _printInfo(self): if VERBOSITY >= 2: print MonitorMixinBase.mmPrettyPrintTraces( self.tp.mmGetDefaultTraces(verbosity=3) + self.tm.mmGetDefaultTraces(verbosity=3), breakOnResets=self.tm.mmGetTraceResets()) print if VERBOSITY >= 1: print MonitorMixinBase.mmPrettyPrintMetrics( self.tp.mmGetDefaultMetrics() + self.tm.mmGetDefaultMetrics()) print
def feedLayers(self, sequences, tmLearn=True, tpLearn=None, verbosity=0, showProgressInterval=None): """ Feed the given sequences to the HTM algorithms. @param tmLearn: (bool) Either False, or True @param tpLearn: (None,bool) Either None, False, or True. If None, temporal pooler will be skipped. @param showProgressInterval: (int) Prints progress every N iterations, where N is the value of this param """ (sensorSequence, motorSequence, sensorimotorSequence, sequenceLabels) = sequences currentTime = time.time() for i in xrange(len(sensorSequence)): sensorPattern = sensorSequence[i] motorPattern = motorSequence[i] sensorimotorPattern = sensorimotorSequence[i] sequenceLabel = sequenceLabels[i] self.feedTransition(sensorPattern, motorPattern, sensorimotorPattern, tmLearn=tmLearn, tpLearn=tpLearn, sequenceLabel=sequenceLabel) if (showProgressInterval is not None and i > 0 and i % showProgressInterval == 0): print ("Fed {0} / {1} elements of the sequence " "in {2:0.2f} seconds.".format( i, len(sensorSequence), time.time() - currentTime)) currentTime = time.time() if verbosity >= 2: # Print default TM traces traces = self.tm.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.tm.mmGetTraceResets()) if tpLearn is not None: # Print default TP traces traces = self.tp.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.tp.mmGetTraceResets()) print
def trainOnline(runner, exhaustiveAgents, completeSequenceLength, trainingRepetitions, verbosity): print "Training temporal memory and temporal pooler..." sequences = runner.generateSequences(completeSequenceLength * trainingRepetitions, exhaustiveAgents, verbosity=verbosity) runner.feedLayers(sequences, tmLearn=True, tpLearn=True, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print print MonitorMixinBase.mmPrettyPrintMetrics(runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print
def trainOnline(runner, exhaustiveAgents, completeSequenceLength, reps, verbosity): print "Training temporal memory and temporal pooler..." sequences = runner.generateSequences(completeSequenceLength * reps, exhaustiveAgents, verbosity=verbosity) runner.feedLayers(sequences, tmLearn=True, tpLearn=True, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print print MonitorMixinBase.mmPrettyPrintMetrics( runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print
def runTestPhase(runner, randomAgents, numWorlds, numElements, completeSequenceLength, verbosity): print "Testing (worlds: {0}, elements: {1})...".format(numWorlds, numElements) runner.tm.mmClearHistory() runner.tp.mmClearHistory() sequences = runner.generateSequences(completeSequenceLength / NUM_TEST_SEQUENCES, randomAgents, verbosity=verbosity, numSequences=NUM_TEST_SEQUENCES) runner.feedLayers(sequences, tmLearn=False, tpLearn=False, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print "Done testing.\n" if verbosity >= 2: print "Overlap:" print print runner.tp.mmPrettyPrintDataOverlap() print print MonitorMixinBase.mmPrettyPrintMetrics( runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print
def runNetworkOnSequences(self, inputSequences, inputCategories, tmLearn=True, upLearn=None, classifierLearn=False, verbosity=0, progressInterval=None): """ Runs Union Temporal Pooler network on specified sequence. @param inputSequences One or more sequences of input patterns. Each should be terminated with None. @param inputCategories A sequence of category representations for each element in inputSequences Each should be terminated with None. @param tmLearn: (bool) Temporal Memory learning mode @param upLearn: (None, bool) Union Temporal Pooler learning mode. If None, Union Temporal Pooler will not be run. @param classifierLearn: (bool) Classifier learning mode @param progressInterval: (int) Interval of console progress updates in terms of timesteps. """ currentTime = time.time() for i in xrange(len(inputSequences)): sensorPattern = inputSequences[i] inputCategory = inputCategories[i] self.runNetworkOnPattern(sensorPattern, tmLearn=tmLearn, upLearn=upLearn, sequenceLabel=inputCategory) if classifierLearn and sensorPattern is not None: unionSDR = self.up.getUnionSDR() upCellCount = self.up.getColumnDimensions() self.classifier.learn(unionSDR, inputCategory, isSparse=upCellCount) if verbosity > 1: pprint.pprint("{0} is category {1}".format(unionSDR, inputCategory)) if progressInterval is not None and i > 0 and i % progressInterval == 0: elapsed = (time.time() - currentTime) / 60.0 print ("Ran {0} / {1} elements of sequence in " "{2:0.2f} minutes.".format(i, len(inputSequences), elapsed)) currentTime = time.time() print MonitorMixinBase.mmPrettyPrintMetrics( self.tm.mmGetDefaultMetrics()) if verbosity >= 2: traces = self.tm.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.tm.mmGetTraceResets()) if upLearn is not None: traces = self.up.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.up.mmGetTraceResets()) print
def experiment1(): paramDir = 'params/1024_baseline/5_trainingPasses.yaml' outputDir = 'results/' params = yaml.safe_load(open(paramDir, 'r')) options = {'plotVerbosity': 2, 'consoleVerbosity': 2} plotVerbosity = 2 consoleVerbosity = 1 print "Running SDR overlap experiment...\n" print "Params dir: {0}".format(paramDir) print "Output dir: {0}\n".format(outputDir) # Dimensionality of sequence patterns patternDimensionality = params["patternDimensionality"] # Cardinality (ON / true bits) of sequence patterns patternCardinality = params["patternCardinality"] # TODO If this parameter is to be supported, the sequence generation code # below must change # Number of unique patterns from which sequences are built # patternAlphabetSize = params["patternAlphabetSize"] # Length of sequences shown to network sequenceLength = params["sequenceLength"] # Number of sequences used. Sequences may share common elements. numberOfSequences = params["numberOfSequences"] # Number of sequence passes for training the TM. Zero => no training. trainingPasses = params["trainingPasses"] tmParamOverrides = params["temporalMemoryParams"] upParamOverrides = params["unionPoolerParams"] # Generate a sequence list and an associated labeled list (both containing a # set of sequences separated by None) start = time.time() print "\nGenerating sequences..." patternAlphabetSize = sequenceLength * numberOfSequences patternMachine = PatternMachine(patternDimensionality, patternCardinality, patternAlphabetSize) sequenceMachine = SequenceMachine(patternMachine) numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength) generatedSequences = sequenceMachine.generateFromNumbers(numbers) sequenceLabels = [str(numbers[i + i*sequenceLength: i + (i+1)*sequenceLength]) for i in xrange(numberOfSequences)] labeledSequences = [] for label in sequenceLabels: for _ in xrange(sequenceLength): labeledSequences.append(label) labeledSequences.append(None) # Set up the Temporal Memory and Union Pooler network print "\nCreating network..." experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides) # Train only the Temporal Memory on the generated sequences if trainingPasses > 0: print "\nTraining Temporal Memory..." if consoleVerbosity > 0: print "\nPass\tBursting Columns Mean\tStdDev\tMax" for i in xrange(trainingPasses): experiment.runNetworkOnSequences(generatedSequences, labeledSequences, tmLearn=True, upLearn=None, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 0: stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) # Reset the TM monitor mixin's records accrued during this training pass # experiment.tm.mmClearHistory() print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print experiment.tm.mmClearHistory() experiment.up.mmClearHistory() print "\nRunning test phase..." inputSequences = generatedSequences inputCategories = labeledSequences tmLearn = True upLearn = False classifierLearn = False currentTime = time.time() experiment.tm.reset() experiment.up.reset() poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 1)) activeCellsTrace = numpy.zeros((experiment.up._numColumns, 1)) activeSPTrace = numpy.zeros((experiment.up._numColumns, 1)) for _ in xrange(trainingPasses): experiment.tm.reset() for i in xrange(len(inputSequences)): sensorPattern = inputSequences[i] inputCategory = inputCategories[i] if sensorPattern is None: pass else: experiment.tm.compute(sensorPattern, learn=tmLearn, sequenceLabel=inputCategory) if upLearn is not None: activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput() experiment.up.compute(activeCells, predActiveCells, learn=upLearn, sequenceLabel=inputCategory) currentPoolingActivation = experiment.up._poolingActivation currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1)) poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1) currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1)) currentUnionSDR[experiment.up._unionSDR] = 1 activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1) currentSPSDR = numpy.zeros((experiment.up._numColumns, 1)) currentSPSDR[experiment.up._activeCells] = 1 activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1) print "\nPass\tBursting Columns Mean\tStdDev\tMax" stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2]) print print MonitorMixinBase.mmPrettyPrintMetrics(\ experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print experiment.tm.mmClearHistory() # estimate fraction of shared bits across adjacent time point unionSDRshared = experiment.up._mmComputeUnionSDRdiff() bitLifeList = experiment.up._mmComputeBitLifeStats() bitLife = numpy.array(bitLifeList) # Plot SP outputs, UP persistence and UP outputs in testing phase def showSequenceStartLine(ax, trainingPasses, sequenceLength): for i in xrange(trainingPasses): ax.vlines(i*sequenceLength, 0, 100, linestyles='--') plt.figure() ncolShow = 100 f, (ax1, ax2, ax3) = plt.subplots(nrows=1,ncols=3) ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys,interpolation="nearest",aspect='auto') showSequenceStartLine(ax1, trainingPasses, sequenceLength) ax1.set_title('SP SDR') ax1.set_ylabel('Columns') ax2.imshow(poolingActivationTrace[1:100,:], cmap=cm.Greys, interpolation="nearest",aspect='auto') showSequenceStartLine(ax2, trainingPasses, sequenceLength) ax2.set_title('Persistence') ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto') showSequenceStartLine(ax3, trainingPasses, sequenceLength) plt.title('Union SDR') ax2.set_xlabel('Time (steps)') pp = PdfPages('results/UnionPoolingOnLearnedTM_Experiment1.pdf') pp.savefig() pp.close() f, (ax1, ax2, ax3) = plt.subplots(nrows=3,ncols=1) ax1.plot((sum(activeCellsTrace))/experiment.up._numColumns*100) ax1.set_ylabel('Union SDR size (%)') ax1.set_xlabel('Time (steps)') ax1.set_ylim(0,25) ax2.plot(unionSDRshared) ax2.set_ylabel('Shared Bits') ax2.set_xlabel('Time (steps)') ax3.hist(bitLife) ax3.set_xlabel('Life duration for each bit') pp = PdfPages('results/UnionSDRproperty_Experiment1.pdf') pp.savefig() pp.close()
def run(params, paramDir, outputDir, plotVerbosity=0, consoleVerbosity=0): """ Runs the union overlap experiment. :param params: A dict of experiment parameters :param paramDir: Path of parameter file :param outputDir: Output will be written to this path :param plotVerbosity: Plotting verbosity :param consoleVerbosity: Console output verbosity """ print "Running SDR overlap experiment...\n" print "Params dir: {0}".format(paramDir) print "Output dir: {0}\n".format(outputDir) # Dimensionality of sequence patterns patternDimensionality = params["patternDimensionality"] # Cardinality (ON / true bits) of sequence patterns patternCardinality = params["patternCardinality"] # TODO If this parameter is to be supported, the sequence generation code # below must change # Number of unique patterns from which sequences are built # patternAlphabetSize = params["patternAlphabetSize"] # Length of sequences shown to network sequenceLength = params["sequenceLength"] # Number of sequences used. Sequences may share common elements. numberOfSequences = params["numberOfSequences"] # Number of sequence passes for training the TM. Zero => no training. trainingPasses = params["trainingPasses"] tmParamOverrides = params["temporalMemoryParams"] upParamOverrides = params["unionPoolerParams"] # Generate a sequence list and an associated labeled list (both containing a # set of sequences separated by None) start = time.time() print "\nGenerating sequences..." patternAlphabetSize = sequenceLength * numberOfSequences patternMachine = PatternMachine(patternDimensionality, patternCardinality, patternAlphabetSize) sequenceMachine = SequenceMachine(patternMachine) numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength) generatedSequences = sequenceMachine.generateFromNumbers(numbers) sequenceLabels = [str(numbers[i + i*sequenceLength: i + (i+1)*sequenceLength]) for i in xrange(numberOfSequences)] labeledSequences = [] for label in sequenceLabels: for _ in xrange(sequenceLength): labeledSequences.append(label) labeledSequences.append(None) # Set up the Temporal Memory and Union Pooler network print "\nCreating network..." experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides) # Train only the Temporal Memory on the generated sequences if trainingPasses > 0: print "\nTraining Temporal Memory..." if consoleVerbosity > 0: print "\nPass\tBursting Columns Mean\tStdDev\tMax" for i in xrange(trainingPasses): experiment.runNetworkOnSequences(generatedSequences, labeledSequences, tmLearn=True, upLearn=None, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 0: stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) # Reset the TM monitor mixin's records accrued during this training pass experiment.tm.mmClearHistory() print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print if plotVerbosity >= 2: plotNetworkState(experiment, plotVerbosity, trainingPasses, phase="Training") print "\nRunning test phase..." experiment.runNetworkOnSequences(generatedSequences, labeledSequences, tmLearn=False, upLearn=False, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) print "\nPass\tBursting Columns Mean\tStdDev\tMax" stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2]) if trainingPasses > 0 and stats[0] > 0: print "***WARNING! MEAN BURSTING COLUMNS IN TEST PHASE IS GREATER THAN 0***" print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print plotNetworkState(experiment, plotVerbosity, trainingPasses, phase="Testing") elapsed = int(time.time() - start) print "Total time: {0:2} seconds.".format(elapsed) # Write Union SDR trace metricName = "activeCells" outputFileName = "unionSdrTrace_{0}learningPasses.csv".format(trainingPasses) writeMetricTrace(experiment, metricName, outputDir, outputFileName) if plotVerbosity >= 1: raw_input("Press any key to exit...")
def runTestPhase(experiment, tmLearn=False, upLearn=True, outputfileName='results/TemporalPoolingOutputs.pdf'): print "\nRunning test phase..." print "tmLearn: ", tmLearn print "upLearn: ", upLearn inputSequences = generatedSequences inputCategories = labeledSequences experiment.tm.mmClearHistory() experiment.up.mmClearHistory() experiment.tm.reset() experiment.up.reset() # Persistence levels across time poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 0)) # union SDR across time activeCellsTrace = numpy.zeros((experiment.up._numColumns, 0)) # active cells in SP across time activeSPTrace = numpy.zeros((experiment.up._numColumns, 0)) # number of connections for SP cells connectionCountTrace = numpy.zeros((experiment.up._numColumns, 0)) # number of active inputs per SP cells activeOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0)) # number of predicted active inputs per SP cells predictedActiveOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0)) for _ in xrange(trainingPasses): experiment.tm.reset() experiment.up.reset() for i in xrange(len(inputSequences)): sensorPattern = inputSequences[i] inputCategory = inputCategories[i] if sensorPattern is None: pass else: experiment.tm.compute(sensorPattern, learn=tmLearn, sequenceLabel=inputCategory) activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput( ) overlapsActive = experiment.up._calculateOverlap(activeCells) overlapsPredictedActive = experiment.up._calculateOverlap( predActiveCells) activeOverlapsTrace = numpy.concatenate( (activeOverlapsTrace, overlapsActive.reshape( (experiment.up._numColumns, 1))), 1) predictedActiveOverlapsTrace = numpy.concatenate( (predictedActiveOverlapsTrace, overlapsPredictedActive.reshape( (experiment.up._numColumns, 1))), 1) experiment.up.compute(activeCells, predActiveCells, learn=upLearn, sequenceLabel=inputCategory) currentPoolingActivation = experiment.up._poolingActivation.reshape( (experiment.up._numColumns, 1)) poolingActivationTrace = numpy.concatenate( (poolingActivationTrace, currentPoolingActivation), 1) currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1)) currentUnionSDR[experiment.up._unionSDR] = 1 activeCellsTrace = numpy.concatenate( (activeCellsTrace, currentUnionSDR), 1) currentSPSDR = numpy.zeros((experiment.up._numColumns, 1)) currentSPSDR[experiment.up._activeCells] = 1 activeSPTrace = numpy.concatenate( (activeSPTrace, currentSPSDR), 1) connectionCountTrace = numpy.concatenate( (connectionCountTrace, experiment.up._connectedCounts.reshape( (experiment.up._numColumns, 1))), 1) print "\nPass\tBursting Columns Mean\tStdDev\tMax" stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2]) print print MonitorMixinBase.mmPrettyPrintMetrics(\ experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print experiment.tm.mmClearHistory() newConnectionCountTrace = numpy.zeros(connectionCountTrace.shape) n = newConnectionCountTrace.shape[1] newConnectionCountTrace[:, 0:n - 2] = connectionCountTrace[:, 1:n - 1] - connectionCountTrace[:, 0: n - 2] # estimate fraction of shared bits across adjacent time point unionSDRshared = experiment.up._mmComputeUnionSDRdiff() bitLifeList = experiment.up._mmComputeBitLifeStats() bitLife = numpy.array(bitLifeList) # Plot SP outputs, UP persistence and UP outputs in testing phase def showSequenceStartLine(ax, trainingPasses, sequenceLength): for i in xrange(trainingPasses): ax.vlines(i * sequenceLength, 0, ax1.get_ylim()[0], linestyles='--') ncolShow = 50 f, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1, ncols=4) ax1.imshow(activeSPTrace[1:ncolShow, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax1, trainingPasses, sequenceLength) ax1.set_title('SP SDR') ax1.set_ylabel('Columns') ax2.imshow(poolingActivationTrace[1:ncolShow, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax2, trainingPasses, sequenceLength) ax2.set_title('Persistence') ax3.imshow(activeCellsTrace[1:ncolShow, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax3, trainingPasses, sequenceLength) ax3.set_title('Union SDR') ax4.imshow(newConnectionCountTrace[1:ncolShow, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax4, trainingPasses, sequenceLength) ax4.set_title('New Connection #') ax2.set_xlabel('Time (steps)') pp = PdfPages(outputfileName) pp.savefig() pp.close()
# Check if TM learning went ok print "Testing TemporalMemory on novel sequences" testSequenceLength = 10 sequences = smer.generateSequences(testSequenceLength, agents, verbosity=1) smer.feedLayers(sequences, tmLearn=False, verbosity=2) print smer.tm.mmPrettyPrintMetrics(smer.tm.mmGetDefaultMetrics()) unpredictedActiveColumnsMetric = smer.tm.mmGetMetricFromTrace( smer.tm.mmGetTraceUnpredictedActiveColumns()) predictedActiveColumnsMetric = smer.tm.mmGetMetricFromTrace( smer.tm.mmGetTracePredictedActiveColumns()) if (unpredictedActiveColumnsMetric.sum == 0) and (predictedActiveColumnsMetric.sum == universe.wSensor * (testSequenceLength - 1) * len(agents)): print "TM training successful!!" else: print "TM training unsuccessful" ############################################################ # Temporal pooler training print "Training TemporalPooler on sequences" sequences = smer.generateSequences(10, agents, verbosity=1) smer.feedLayers(sequences, tmLearn=False, tpLearn=True, verbosity=2) print MonitorMixinBase.mmPrettyPrintMetrics(smer.tm.mmGetDefaultMetrics() + smer.tp.mmGetDefaultMetrics())
# Check if TM learning went ok print "Testing TemporalMemory on novel sequences" testSequenceLength=10 sequences = smer.generateSequences(testSequenceLength, agents, verbosity=1) smer.feedLayers(sequences, tmLearn=False, verbosity=2) print smer.tm.mmPrettyPrintMetrics(smer.tm.mmGetDefaultMetrics()) unpredictedActiveColumnsMetric = smer.tm.mmGetMetricFromTrace( smer.tm.mmGetTraceUnpredictedActiveColumns()) predictedActiveColumnsMetric = smer.tm.mmGetMetricFromTrace( smer.tm.mmGetTracePredictedActiveColumns()) if (unpredictedActiveColumnsMetric.sum == 0) and ( predictedActiveColumnsMetric.sum == universe.wSensor*(testSequenceLength-1)*len(agents)): print "TM training successful!!" else: print "TM training unsuccessful" ############################################################ # Temporal pooler training print "Training TemporalPooler on sequences" sequences = smer.generateSequences(10, agents, verbosity=1) smer.feedLayers(sequences, tmLearn=False, tpLearn=True, verbosity=2) print MonitorMixinBase.mmPrettyPrintMetrics(smer.tm.mmGetDefaultMetrics() + smer.tp.mmGetDefaultMetrics())
def run(params, paramDir, outputDir, plotVerbosity=0, consoleVerbosity=0): """ Runs the Union Temporal Pooler capacity experiment. :param params: A dict containing the following experiment parameters: patternDimensionality - Dimensionality of sequence patterns patternCardinality - Cardinality (# ON bits) of sequence patterns sequenceLength - Length of sequences shown to network sequenceCount - Number of unique sequences used trainingPasses - Number of times Temporal Memory is trained on each sequence temporalMemoryParams - A dict of Temporal Memory parameter overrides unionTemporalPoolerParams - A dict of Union Temporal Pooler parameter overrides :param paramDir: Path of parameter file :param outputDir: Output will be written to this path :param plotVerbosity: Plotting verbosity :param consoleVerbosity: Console output verbosity """ start = time.time() print "Running Union Temporal Pooler Capacity Experiment...\n" print "Params dir: {0}".format(os.path.join(os.path.dirname(__file__), paramDir)) print "Output dir: {0}\n".format(os.path.join(os.path.dirname(__file__), outputDir)) patternDimensionality = params["patternDimensionality"] patternCardinality = params["patternCardinality"] sequenceLength = params["sequenceLength"] sequenceCount = params["numberOfSequences"] trainingPasses = params["trainingPasses"] tmParamOverrides = params["temporalMemoryParams"] upParamOverrides = params["unionTemporalPoolerParams"] # Generate input data inputSequences, seqLabels = generateSequences(patternDimensionality, patternCardinality, sequenceLength, sequenceCount) print "\nCreating Network..." experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides) # Train the Temporal Memory on the generated sequences print "\nTraining Temporal Memory..." for i in xrange(trainingPasses): print "\nTraining pass {0} ...\n".format(i) experiment.runNetworkOnSequences(inputSequences, seqLabels, tmLearn=True, upLearn=None, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 0: stats = experiment.getBurstingColumnsStats() print "\nPass\tMean\t\tStdDev\t\tMax\t\t(Bursting Columns)" print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print experiment.tm.mmClearHistory() # Run test phase recording Union SDRs unionSdrs = runTestPhase(experiment, inputSequences, seqLabels, sequenceCount, sequenceLength, consoleVerbosity) # Output distinctness metric print "\nSequences\tDistinctness Ave\tStdDev\tMax" ave, stdDev, maxDist = getDistinctness(unionSdrs) print "{0}\t{1}\t{2}\t{3}".format(sequenceCount, ave, stdDev, maxDist) # Check bursting columns metric during test phase print "\nSequences\tBursting Columns Mean\tStdDev\tMax" stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(sequenceCount, stats[0], stats[1], stats[2]) if trainingPasses > 0 and stats[0] > 0: print "***Warning! Mean bursing columns > 0 in test phase***" print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print print "Total time: {0:2} seconds.".format(int(time.time() - start))
def runTestPhase(experiment, tmLearn=False, upLearn=True, outputfileName='results/TemporalPoolingOutputs.pdf'): print "\nRunning test phase..." print "tmLearn: ", tmLearn print "upLearn: ", upLearn inputSequences = generatedSequences inputCategories = labeledSequences experiment.tm.mmClearHistory() experiment.up.mmClearHistory() experiment.tm.reset() experiment.up.reset() # Persistence levels across time poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 0)) # union SDR across time activeCellsTrace = numpy.zeros((experiment.up._numColumns, 0)) # active cells in SP across time activeSPTrace = numpy.zeros((experiment.up._numColumns, 0)) # number of connections for SP cells connectionCountTrace = numpy.zeros((experiment.up._numColumns, 0)) # number of active inputs per SP cells activeOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0)) # number of predicted active inputs per SP cells predictedActiveOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0)) for _ in xrange(trainingPasses): experiment.tm.reset() experiment.up.reset() for i in xrange(len(inputSequences)): sensorPattern = inputSequences[i] inputCategory = inputCategories[i] if sensorPattern is None: pass else: experiment.tm.compute(sensorPattern, learn=tmLearn, sequenceLabel=inputCategory) activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput() overlapsActive = experiment.up._calculateOverlap(activeCells) overlapsPredictedActive = experiment.up._calculateOverlap(predActiveCells) activeOverlapsTrace = numpy.concatenate((activeOverlapsTrace, overlapsActive.reshape((experiment.up._numColumns,1))), 1) predictedActiveOverlapsTrace = numpy.concatenate((predictedActiveOverlapsTrace, overlapsPredictedActive.reshape((experiment.up._numColumns,1))), 1) experiment.up.compute(activeCells, predActiveCells, learn=upLearn, sequenceLabel=inputCategory) currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1)) poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1) currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1)) currentUnionSDR[experiment.up._unionSDR] = 1 activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1) currentSPSDR = numpy.zeros((experiment.up._numColumns, 1)) currentSPSDR[experiment.up._activeCells] = 1 activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1) connectionCountTrace = numpy.concatenate((connectionCountTrace, experiment.up._connectedCounts.reshape((experiment.up._numColumns, 1))), 1) print "\nPass\tBursting Columns Mean\tStdDev\tMax" stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2]) print print MonitorMixinBase.mmPrettyPrintMetrics(\ experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print experiment.tm.mmClearHistory() newConnectionCountTrace = numpy.zeros(connectionCountTrace.shape) n = newConnectionCountTrace.shape[1] newConnectionCountTrace[:,0:n-2] = connectionCountTrace[:,1:n-1] - connectionCountTrace[:,0:n-2] # estimate fraction of shared bits across adjacent time point unionSDRshared = experiment.up._mmComputeUnionSDRdiff() bitLifeList = experiment.up._mmComputeBitLifeStats() bitLife = numpy.array(bitLifeList) # Plot SP outputs, UP persistence and UP outputs in testing phase def showSequenceStartLine(ax, trainingPasses, sequenceLength): for i in xrange(trainingPasses): ax.vlines(i*sequenceLength, 0, ax1.get_ylim()[0], linestyles='--') ncolShow = 50 f, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1,ncols=4) ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys,interpolation="nearest",aspect='auto') showSequenceStartLine(ax1, trainingPasses, sequenceLength) ax1.set_title('SP SDR') ax1.set_ylabel('Columns') ax2.imshow(poolingActivationTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto') showSequenceStartLine(ax2, trainingPasses, sequenceLength) ax2.set_title('Persistence') ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto') showSequenceStartLine(ax3, trainingPasses, sequenceLength) ax3.set_title('Union SDR') ax4.imshow(newConnectionCountTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto') showSequenceStartLine(ax4, trainingPasses, sequenceLength) ax4.set_title('New Connection #') ax2.set_xlabel('Time (steps)') pp = PdfPages(outputfileName) pp.savefig() pp.close()
def experiment2(): paramDir = 'params/1024_baseline/5_trainingPasses.yaml' outputDir = 'results/' params = yaml.safe_load(open(paramDir, 'r')) options = {'plotVerbosity': 2, 'consoleVerbosity': 2} plotVerbosity = 2 consoleVerbosity = 1 print "Running SDR overlap experiment...\n" print "Params dir: {0}".format(paramDir) print "Output dir: {0}\n".format(outputDir) # Dimensionality of sequence patterns patternDimensionality = params["patternDimensionality"] # Cardinality (ON / true bits) of sequence patterns patternCardinality = params["patternCardinality"] # TODO If this parameter is to be supported, the sequence generation code # below must change # Number of unique patterns from which sequences are built # patternAlphabetSize = params["patternAlphabetSize"] # Length of sequences shown to network sequenceLength = params["sequenceLength"] # Number of sequences used. Sequences may share common elements. numberOfSequences = params["numberOfSequences"] # Number of sequence passes for training the TM. Zero => no training. trainingPasses = params["trainingPasses"] tmParamOverrides = params["temporalMemoryParams"] upParamOverrides = params["unionPoolerParams"] # Generate a sequence list and an associated labeled list (both containing a # set of sequences separated by None) start = time.time() print "\nGenerating sequences..." patternAlphabetSize = sequenceLength * numberOfSequences patternMachine = PatternMachine(patternDimensionality, patternCardinality, patternAlphabetSize) sequenceMachine = SequenceMachine(patternMachine) numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength) generatedSequences = sequenceMachine.generateFromNumbers(numbers) sequenceLabels = [ str(numbers[i + i * sequenceLength:i + (i + 1) * sequenceLength]) for i in xrange(numberOfSequences) ] labeledSequences = [] for label in sequenceLabels: for _ in xrange(sequenceLength): labeledSequences.append(label) labeledSequences.append(None) # Set up the Temporal Memory and Union Pooler network print "\nCreating network..." experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides) # Train only the Temporal Memory on the generated sequences # if trainingPasses > 0: # # print "\nTraining Temporal Memory..." # if consoleVerbosity > 0: # print "\nPass\tBursting Columns Mean\tStdDev\tMax" # # for i in xrange(trainingPasses): # experiment.runNetworkOnSequences(generatedSequences, # labeledSequences, # tmLearn=True, # upLearn=None, # verbosity=consoleVerbosity, # progressInterval=_SHOW_PROGRESS_INTERVAL) # # if consoleVerbosity > 0: # stats = experiment.getBurstingColumnsStats() # print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) # # # Reset the TM monitor mixin's records accrued during this training pass # # experiment.tm.mmClearHistory() # # print # print MonitorMixinBase.mmPrettyPrintMetrics( # experiment.tm.mmGetDefaultMetrics()) # print # # if plotVerbosity >= 2: # plotNetworkState(experiment, plotVerbosity, trainingPasses, phase="Training") # # experiment.tm.mmClearHistory() # experiment.up.mmClearHistory() print "\nRunning test phase..." inputSequences = generatedSequences inputCategories = labeledSequences tmLearn = True upLearn = False classifierLearn = False currentTime = time.time() experiment.tm.reset() experiment.up.reset() poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 1)) activeCellsTrace = numpy.zeros((experiment.up._numColumns, 1)) activeSPTrace = numpy.zeros((experiment.up._numColumns, 1)) for _ in xrange(trainingPasses): for i in xrange(len(inputSequences)): sensorPattern = inputSequences[i] inputCategory = inputCategories[i] if sensorPattern is None: pass else: experiment.tm.compute(sensorPattern, learn=tmLearn, sequenceLabel=inputCategory) if upLearn is not None: activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput( ) experiment.up.compute(activeCells, predActiveCells, learn=upLearn, sequenceLabel=inputCategory) currentPoolingActivation = experiment.up._poolingActivation currentPoolingActivation = experiment.up._poolingActivation.reshape( (experiment.up._numColumns, 1)) poolingActivationTrace = numpy.concatenate( (poolingActivationTrace, currentPoolingActivation), 1) currentUnionSDR = numpy.zeros( (experiment.up._numColumns, 1)) currentUnionSDR[experiment.up._unionSDR] = 1 activeCellsTrace = numpy.concatenate( (activeCellsTrace, currentUnionSDR), 1) currentSPSDR = numpy.zeros((experiment.up._numColumns, 1)) currentSPSDR[experiment.up._activeCells] = 1 activeSPTrace = numpy.concatenate( (activeSPTrace, currentSPSDR), 1) print "\nPass\tBursting Columns Mean\tStdDev\tMax" stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2]) print print MonitorMixinBase.mmPrettyPrintMetrics(\ experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print experiment.tm.mmClearHistory() # estimate fraction of shared bits across adjacent time point unionSDRshared = experiment.up._mmComputeUnionSDRdiff() bitLifeList = experiment.up._mmComputeBitLifeStats() bitLife = numpy.array(bitLifeList) # Plot SP outputs, UP persistence and UP outputs in testing phase def showSequenceStartLine(ax, trainingPasses, sequenceLength): for i in xrange(trainingPasses): ax.vlines(i * sequenceLength, 0, 100, linestyles='--') plt.figure() ncolShow = 100 f, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3) ax1.imshow(activeSPTrace[1:ncolShow, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax1, trainingPasses, sequenceLength) ax1.set_title('SP SDR') ax1.set_ylabel('Columns') ax2.imshow(poolingActivationTrace[1:100, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax2, trainingPasses, sequenceLength) ax2.set_title('Persistence') ax3.imshow(activeCellsTrace[1:ncolShow, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax3, trainingPasses, sequenceLength) plt.title('Union SDR') ax2.set_xlabel('Time (steps)') pp = PdfPages('results/UnionPoolingDuringTMlearning_Experiment2.pdf') pp.savefig() pp.close() f, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1) ax1.plot((sum(activeCellsTrace)) / experiment.up._numColumns * 100) ax1.set_ylabel('Union SDR size (%)') ax1.set_xlabel('Time (steps)') ax1.set_ylim(0, 25) ax2.plot(unionSDRshared) ax2.set_ylabel('Shared Bits') ax2.set_xlabel('Time (steps)') ax3.hist(bitLife) ax3.set_xlabel('Life duration for each bit') pp = PdfPages('results/UnionSDRproperty_Experiment2.pdf') pp.savefig() pp.close()