def trainTemporalMemory(experiment, inputSequences, inputCategories, trainingPasses, consoleVerbosity): burstingColsString = "" for i in xrange(trainingPasses): experiment.runNetworkOnSequences( inputSequences, inputCategories, tmLearn=True, upLearn=None, classifierLearn=False, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 1: print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print stats = experiment.getBurstingColumnsStats() burstingColsString += "{0}\t{1}\t{2}\t{3}\n".format( i, stats[0], stats[1], stats[2]) experiment.tm.mmClearHistory() experiment.up.mmClearHistory() if consoleVerbosity > 0: print "\nTemporal Memory Bursting Columns stats..." print "Pass\tMean\t\tStdDev\t\tMax" print burstingColsString
def trainTwoPass(runner, exhaustiveAgents, completeSequenceLength, verbosity): print "Training temporal memory..." sequences = runner.generateSequences(completeSequenceLength * TWOPASS_TM_TRAINING_REPS, exhaustiveAgents, verbosity=verbosity) runner.feedLayers(sequences, tmLearn=True, tpLearn=False, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print print MonitorMixinBase.mmPrettyPrintMetrics( runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print print "Training temporal pooler..." runner.tm.mmClearHistory() runner.tp.mmClearHistory() sequences = runner.generateSequences(completeSequenceLength * TWOPASS_TP_TRAINING_REPS, exhaustiveAgents, verbosity=verbosity) runner.feedLayers(sequences, tmLearn=False, tpLearn=True, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print print MonitorMixinBase.mmPrettyPrintMetrics( runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print
def run(tm, mutate_times, sequences, alphabet, feedback_seq=None, mutation=0, verbose=0): allLabels = [] tm.reset() for j, sensorPattern in enumerate(sequences): if sensorPattern is None: tm.reset() else: if j in mutate_times: if mutation: continue else: sensorPattern = set([random.randint(0, 2047) for _ in sensorPattern]) if feedback_seq is not None: feedback = feedback_seq[j] else: feedback = set() tm.compute(sensorPattern, activeApicalCells=feedback, learn=True, sequenceLabel=None) allLabels.append(labelPattern(sensorPattern, alphabet)) ys = [len(x) for x in tm.mmGetTraceUnpredictedActiveColumns().data] if verbose > 0: print " TM metrics on test sequence" print MonitorMixinBase.mmPrettyPrintMetrics(tm.mmGetDefaultMetrics()) if verbose > 1: print MonitorMixinBase.mmPrettyPrintTraces(tm.mmGetDefaultTraces(verbosity=True), breakOnResets=tm.mmGetTraceResets()) return ys, allLabels
def runTMtrainingPhase(experiment): # Train only the Temporal Memory on the generated sequences if trainingPasses > 0: print "\nTraining Temporal Memory..." if consoleVerbosity > 0: print "\nPass\tBursting Columns Mean\tStdDev\tMax" for i in xrange(trainingPasses): experiment.runNetworkOnSequences(generatedSequences, labeledSequences, tmLearn=True, upLearn=None, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 0: stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) # Reset the TM monitor mixin's records accrued during this training pass # experiment.tm.mmClearHistory() print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print
def trainTwoPass(runner, exhaustiveAgents, completeSequenceLength, verbosity): print "Training temporal memory..." sequences = runner.generateSequences(completeSequenceLength * TWOPASS_TM_TRAINING_REPS, exhaustiveAgents, verbosity=verbosity) runner.feedLayers(sequences, tmLearn=True, tpLearn=False, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print print MonitorMixinBase.mmPrettyPrintMetrics(runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print print "Training temporal pooler..." runner.tm.mmClearHistory() runner.tp.mmClearHistory() sequences = runner.generateSequences(completeSequenceLength * TWOPASS_TP_TRAINING_REPS, exhaustiveAgents, verbosity=verbosity) runner.feedLayers(sequences, tmLearn=False, tpLearn=True, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print print MonitorMixinBase.mmPrettyPrintMetrics(runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print
def train(tm, sequences, feedback_seq=None, trials=trials, feedback_buffer=10, clearhistory=True, verbose=0): for i in range(trials): for j, sensorPattern in enumerate(sequences): if sensorPattern is None: tm.reset() else: if i<feedback_buffer: feedback = set([random.randint(0, 2047) for _ in range(feedback_n)]) elif feedback_seq is not None: feedback = feedback_seq[j] else: feedback = set() tm.compute(sensorPattern, activeApicalCells=feedback, learn=True, sequenceLabel=None) if clearhistory: if i == trials-1: if verbose > 0: print " TM metrics after training" print MonitorMixinBase.mmPrettyPrintMetrics(tm.mmGetDefaultMetrics()) if verbose > 1: print " TM traces after training" print MonitorMixinBase.mmPrettyPrintTraces(tm.mmGetDefaultTraces(verbosity=True), breakOnResets=tm.mmGetTraceResets()) tm.mmClearHistory()
def runTMtrainingPhase(experiment): # Train only the Temporal Memory on the generated sequences if trainingPasses > 0: print "\nTraining Temporal Memory..." if consoleVerbosity > 0: print "\nPass\tBursting Columns Mean\tStdDev\tMax" for i in xrange(trainingPasses): experiment.runNetworkOnSequences( generatedSequences, labeledSequences, tmLearn=True, upLearn=None, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 0: stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) # Reset the TM monitor mixin's records accrued during this training pass # experiment.tm.mmClearHistory() print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print
def runTestPhase(runner, randomAgents, numWorlds, numElements, completeSequenceLength, verbosity): print "Testing (worlds: {0}, elements: {1})...".format( numWorlds, numElements) runner.tm.mmClearHistory() runner.tp.mmClearHistory() sequences = runner.generateSequences(completeSequenceLength / NUM_TEST_SEQUENCES, randomAgents, verbosity=verbosity, numSequences=NUM_TEST_SEQUENCES) runner.feedLayers(sequences, tmLearn=False, tpLearn=False, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print "Done testing.\n" if verbosity >= 2: print "Overlap:" print print runner.tp.mmPrettyPrintDataOverlap() print print MonitorMixinBase.mmPrettyPrintMetrics( runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print
def trainUP(tm, sequences, up=None, trials=trials, clearhistory=True, verbose=0): for i in range(trials): for j, sensorPattern in enumerate(sequences): if sensorPattern is None: tm.reset() if up is not None: up.reset() else: if up is None: feedback = set() else: feedback = set(np.nonzero(up.getUnionSDR())[0]) tm.compute(sensorPattern, activeApicalCells=feedback, learn=True, sequenceLabel=None) if up is not None: activeCells, predActiveCells, burstingCols, = getUnionTemporalPoolerInput(tm) up.compute(activeCells, predActiveCells, learn=False) if clearhistory: if i == trials-1: if verbose > 0: print " TM metrics after training" print MonitorMixinBase.mmPrettyPrintMetrics(tm.mmGetDefaultMetrics()) if verbose > 1: print " TM traces after training" print MonitorMixinBase.mmPrettyPrintTraces(tm.mmGetDefaultTraces(verbosity=True), breakOnResets=tm.mmGetTraceResets()) tm.mmClearHistory()
def trainTemporalMemory(experiment, inputSequences, inputCategories, trainingPasses, consoleVerbosity): burstingColsString = "" for i in xrange(trainingPasses): experiment.runNetworkOnSequences(inputSequences, inputCategories, tmLearn=True, upLearn=None, classifierLearn=False, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 1: print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print stats = experiment.getBurstingColumnsStats() burstingColsString += "{0}\t{1}\t{2}\t{3}\n".format(i, stats[0], stats[1], stats[2]) experiment.tm.mmClearHistory() experiment.up.mmClearHistory() if consoleVerbosity > 0: print "\nTemporal Memory Bursting Columns stats..." print "Pass\tMean\t\tStdDev\t\tMax" print burstingColsString
def trainOnline(runner, exhaustiveAgents, completeSequenceLength, reps, verbosity): print "Training temporal memory and temporal pooler..." sequences = runner.generateSequences(completeSequenceLength * reps, exhaustiveAgents, verbosity=verbosity) runner.feedLayers( sequences, tmLearn=True, tpLearn=True, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL ) print print MonitorMixinBase.mmPrettyPrintMetrics(runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print
def feedLayers(self, sequences, tmLearn=True, tpLearn=None, verbosity=0, showProgressInterval=None): """ Feed the given sequences to the HTM algorithms. @param tmLearn: (bool) Either False, or True @param tpLearn: (None,bool) Either None, False, or True. If None, temporal pooler will be skipped. @param showProgressInterval: (int) Prints progress every N iterations, where N is the value of this param """ (sensorSequence, motorSequence, sensorimotorSequence, sequenceLabels) = sequences currentTime = time.time() for i in xrange(len(sensorSequence)): sensorPattern = sensorSequence[i] motorPattern = motorSequence[i] sensorimotorPattern = sensorimotorSequence[i] sequenceLabel = sequenceLabels[i] self.feedTransition(sensorPattern, motorPattern, sensorimotorPattern, tmLearn=tmLearn, tpLearn=tpLearn, sequenceLabel=sequenceLabel) if (showProgressInterval is not None and i > 0 and i % showProgressInterval == 0): print( "Fed {0} / {1} elements of the sequence " "in {2:0.2f} seconds.".format(i, len(sensorSequence), time.time() - currentTime)) currentTime = time.time() if verbosity >= 2: # Print default TM traces traces = self.tm.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces( traces, breakOnResets=self.tm.mmGetTraceResets()) if tpLearn is not None: # Print default TP traces traces = self.tp.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces( traces, breakOnResets=self.tp.mmGetTraceResets()) print
def _printInfo(self): if VERBOSITY >= 2: print MonitorMixinBase.mmPrettyPrintTraces( self.tp.mmGetDefaultTraces(verbosity=3) + self.tm.mmGetDefaultTraces(verbosity=3), breakOnResets=self.tm.mmGetTraceResets()) print if VERBOSITY >= 1: print MonitorMixinBase.mmPrettyPrintMetrics( self.tp.mmGetDefaultMetrics() + self.tm.mmGetDefaultMetrics()) print
def feed(self, sequences, tmLearn=True, tpLearn=None, verbosity=2, showProgressInterval=None): # Note: not setup for TP... # https://github.com/numenta/nupic.research/blob/master/sensorimotor/sensorimotor/sensorimotor_experiment_runner.py#L131 """ Feed the given sequences to the HTM algorithms. @param tmLearn: (bool) Either False, or True @param tpLearn: (None,bool) Either None, False, or True. If None, temporal pooler will be skipped. @param showProgressInterval: (int) Prints progress every N iterations, where N is the value of this param """ # (sensorSequence, # motorSequence, # sensorimotorSequence, # sequenceLabels) = sequences (sensorSequences, motorSequences) = sequences currentTime = time.time() for i in xrange(len(sensorSequences)): for j in xrange(len(sensorSequences[i])): sensorPattern = set(sensorSequences[i][j]) motorPattern = set(motorSequences[i][j]) # sensorimotorPattern = sensorimotorSequence[i][j] # import pdb; pdb.set_trace() self.tm.compute( sensorPattern, # here the sequences are e.g. set([224, 480, 195, 277, 235,...]) activeExternalCells=motorPattern, formInternalConnections=True, learn=tmLearn) if (showProgressInterval is not None and i > 0 and i % showProgressInterval == 0): print( "Fed {0} / {1} elements of the sequence " "in {2:0.2f} seconds.".format(i, len(sensorSequence), time.time() - currentTime)) currentTime = time.time() if verbosity >= 2: traces = [] traces += self.tm.mmGetDefaultTraces(verbosity=verbosity) if tpLearn is not None: traces += self.tp.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces( traces, breakOnResets=self.tm.mmGetTraceResets()) print
def feedLayers(self, sequences, tmLearn=True, tpLearn=None, verbosity=0, showProgressInterval=None): """ Feed the given sequences to the HTM algorithms. @param tmLearn: (bool) Either False, or True @param tpLearn: (None,bool) Either None, False, or True. If None, temporal pooler will be skipped. @param showProgressInterval: (int) Prints progress every N iterations, where N is the value of this param """ (sensorSequence, motorSequence, sensorimotorSequence, sequenceLabels) = sequences currentTime = time.time() for i in xrange(len(sensorSequence)): sensorPattern = sensorSequence[i] motorPattern = motorSequence[i] sensorimotorPattern = sensorimotorSequence[i] sequenceLabel = sequenceLabels[i] self.feedTransition(sensorPattern, motorPattern, sensorimotorPattern, tmLearn=tmLearn, tpLearn=tpLearn, sequenceLabel=sequenceLabel) if (showProgressInterval is not None and i > 0 and i % showProgressInterval == 0): print ("Fed {0} / {1} elements of the sequence " "in {2:0.2f} seconds.".format( i, len(sensorSequence), time.time() - currentTime)) currentTime = time.time() if verbosity >= 2: # Print default TM traces traces = self.tm.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.tm.mmGetTraceResets()) if tpLearn is not None: # Print default TP traces traces = self.tp.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.tp.mmGetTraceResets()) print
def trainOnline(runner, exhaustiveAgents, completeSequenceLength, reps, verbosity): print "Training temporal memory and temporal pooler..." sequences = runner.generateSequences(completeSequenceLength * reps, exhaustiveAgents, verbosity=verbosity) runner.feedLayers(sequences, tmLearn=True, tpLearn=True, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL) print print MonitorMixinBase.mmPrettyPrintMetrics(runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print
def runUP(tm, mutate_times, sequences, alphabet, up=None, mutation=0, verbose=0): allLabels = [] for j, sensorPattern in enumerate(sequences): if sensorPattern is None: tm.reset() else: if j in mutate_times: if mutation: continue else: sensorPattern = set( [random.randint(0, 2047) for _ in sensorPattern]) if up is None: feedback = set() else: feedback = set(np.nonzero(up.getUnionSDR())[0]) tm.compute(sensorPattern, activeApicalCells=feedback, learn=True, sequenceLabel=None) if up is not None: activeCells, predActiveCells, burstingCols, = getUnionTemporalPoolerInput( tm) up.compute(activeCells, predActiveCells, learn=False) allLabels.append(labelPattern(sensorPattern, alphabet)) ys = [len(x) for x in tm.mmGetTraceUnpredictedActiveColumns().data] if verbose > 0: print " TM metrics on test sequence" print MonitorMixinBase.mmPrettyPrintMetrics(tm.mmGetDefaultMetrics()) if verbose > 1: print MonitorMixinBase.mmPrettyPrintTraces( tm.mmGetDefaultTraces(verbosity=True), breakOnResets=tm.mmGetTraceResets()) return ys, allLabels
def runNetworkOnSequence(self, sensorSequences, sequencesLabels, tmLearn=True, upLearn=None, verbosity=0, progressInterval=None): """ Runs Union Pooler network on specified sequence. @param sensorSequences A sequence of sensor sequences. Each sequence is terminated by None. @param sequenceLabels A sequence of string representations of the current sequence. Each sequence is terminated by None. @param tmLearn: (bool) Either False, or True @param upLearn: (None,bool) Either None, False, or True. If None, union pooler will be skipped. @param progressInterval: (int) Prints progress every N iterations, where N is the value of this param """ currentTime = time.time() for i in xrange(len(sensorSequences)): sensorPattern = sensorSequences[i] sequenceLabel = sequencesLabels[i] self.runNetworkOnPattern(sensorPattern, tmLearn=tmLearn, upLearn=upLearn, sequenceLabel=sequenceLabel) if progressInterval is not None and i > 0 and i % progressInterval == 0: print ("Ran {0} / {1} elements of sequence in " "{2:0.2f} seconds.".format(i, len(sensorSequences), time.time() - currentTime)) currentTime = time.time() if verbosity >= 2: traces = self.tm.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.tm.mmGetTraceResets()) if upLearn is not None: traces = self.up.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.up.mmGetTraceResets()) print
def run(tm, mutate_times, sequences, alphabet, feedback_seq=None, mutation=0, verbose=0): allLabels = [] tm.reset() for j, sensorPattern in enumerate(sequences): if sensorPattern is None: tm.reset() else: if j in mutate_times: if mutation: continue else: sensorPattern = set( [random.randint(0, 2047) for _ in sensorPattern]) if feedback_seq is not None: feedback = feedback_seq[j] else: feedback = set() tm.compute(sensorPattern, activeApicalCells=feedback, learn=True, sequenceLabel=None) allLabels.append(labelPattern(sensorPattern, alphabet)) ys = [len(x) for x in tm.mmGetTraceUnpredictedActiveColumns().data] if verbose > 0: print " TM metrics on test sequence" print MonitorMixinBase.mmPrettyPrintMetrics(tm.mmGetDefaultMetrics()) if verbose > 1: print MonitorMixinBase.mmPrettyPrintTraces( tm.mmGetDefaultTraces(verbosity=True), breakOnResets=tm.mmGetTraceResets()) return ys, allLabels
def runTestPhase(runner, randomAgents, numWorlds, numElements, completeSequenceLength, verbosity): print "Testing (worlds: {0}, elements: {1})...".format(numWorlds, numElements) runner.tm.mmClearHistory() runner.tp.mmClearHistory() sequences = runner.generateSequences( completeSequenceLength / NUM_TEST_SEQUENCES, randomAgents, verbosity=verbosity, numSequences=NUM_TEST_SEQUENCES ) runner.feedLayers( sequences, tmLearn=False, tpLearn=False, verbosity=verbosity, showProgressInterval=SHOW_PROGRESS_INTERVAL ) print "Done testing.\n" if verbosity >= 2: print "Overlap:" print print runner.tp.mmPrettyPrintDataOverlap() print print MonitorMixinBase.mmPrettyPrintMetrics(runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print
def trainUP(tm, sequences, up=None, trials=trials, clearhistory=True, verbose=0): for i in range(trials): for j, sensorPattern in enumerate(sequences): if sensorPattern is None: tm.reset() if up is not None: up.reset() else: if up is None: feedback = set() else: feedback = set(np.nonzero(up.getUnionSDR())[0]) tm.compute(sensorPattern, activeApicalCells=feedback, learn=True, sequenceLabel=None) if up is not None: activeCells, predActiveCells, burstingCols, = getUnionTemporalPoolerInput( tm) up.compute(activeCells, predActiveCells, learn=False) if clearhistory: if i == trials - 1: if verbose > 0: print " TM metrics after training" print MonitorMixinBase.mmPrettyPrintMetrics( tm.mmGetDefaultMetrics()) if verbose > 1: print " TM traces after training" print MonitorMixinBase.mmPrettyPrintTraces( tm.mmGetDefaultTraces(verbosity=True), breakOnResets=tm.mmGetTraceResets()) tm.mmClearHistory()
def runNetworkOnSequences(self, inputSequences, inputCategories, tmLearn=True, upLearn=None, classifierLearn=False, verbosity=0, progressInterval=None): """ Runs Union Temporal Pooler network on specified sequence. @param inputSequences One or more sequences of input patterns. Each should be terminated with None. @param inputCategories A sequence of category representations for each element in inputSequences Each should be terminated with None. @param tmLearn: (bool) Temporal Memory learning mode @param upLearn: (None, bool) Union Temporal Pooler learning mode. If None, Union Temporal Pooler will not be run. @param classifierLearn: (bool) Classifier learning mode @param progressInterval: (int) Interval of console progress updates in terms of timesteps. """ currentTime = time.time() for i in xrange(len(inputSequences)): sensorPattern = inputSequences[i] inputCategory = inputCategories[i] self.runNetworkOnPattern(sensorPattern, tmLearn=tmLearn, upLearn=upLearn, sequenceLabel=inputCategory) if classifierLearn and sensorPattern is not None: unionSDR = self.up.getUnionSDR() upCellCount = self.up.getColumnDimensions() self.classifier.learn(unionSDR, inputCategory, isSparse=upCellCount) if verbosity > 1: pprint.pprint("{0} is category {1}".format(unionSDR, inputCategory)) if progressInterval is not None and i > 0 and i % progressInterval == 0: elapsed = (time.time() - currentTime) / 60.0 print ("Ran {0} / {1} elements of sequence in " "{2:0.2f} minutes.".format(i, len(inputSequences), elapsed)) currentTime = time.time() print MonitorMixinBase.mmPrettyPrintMetrics( self.tm.mmGetDefaultMetrics()) if verbosity >= 2: traces = self.tm.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.tm.mmGetTraceResets()) if upLearn is not None: traces = self.up.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.up.mmGetTraceResets()) print
def train(tm, sequences, feedback_seq=None, trials=trials, feedback_buffer=10, clearhistory=True, verbose=0): for i in range(trials): for j, sensorPattern in enumerate(sequences): if sensorPattern is None: tm.reset() else: if i < feedback_buffer: feedback = set( [random.randint(0, 2047) for _ in range(feedback_n)]) elif feedback_seq is not None: feedback = feedback_seq[j] else: feedback = set() tm.compute(sensorPattern, activeApicalCells=feedback, learn=True, sequenceLabel=None) if clearhistory: if i == trials - 1: if verbose > 0: print " TM metrics after training" print MonitorMixinBase.mmPrettyPrintMetrics( tm.mmGetDefaultMetrics()) if verbose > 1: print " TM traces after training" print MonitorMixinBase.mmPrettyPrintTraces( tm.mmGetDefaultTraces(verbosity=True), breakOnResets=tm.mmGetTraceResets()) tm.mmClearHistory()
def runUP(tm, mutate_times, sequences, alphabet, up=None, mutation=0, verbose=0): allLabels = [] for j, sensorPattern in enumerate(sequences): if sensorPattern is None: tm.reset() else: if j in mutate_times: if mutation: continue else: sensorPattern = set([random.randint(0, 2047) for _ in sensorPattern]) if up is None: feedback = set() else: feedback = set(np.nonzero(up.getUnionSDR())[0]) tm.compute(sensorPattern, activeApicalCells=feedback, learn=True, sequenceLabel=None) if up is not None: activeCells, predActiveCells, burstingCols, = getUnionTemporalPoolerInput(tm) up.compute(activeCells, predActiveCells, learn=False) allLabels.append(labelPattern(sensorPattern, alphabet)) ys = [len(x) for x in tm.mmGetTraceUnpredictedActiveColumns().data] if verbose > 0: print " TM metrics on test sequence" print MonitorMixinBase.mmPrettyPrintMetrics(tm.mmGetDefaultMetrics()) if verbose > 1: print MonitorMixinBase.mmPrettyPrintTraces(tm.mmGetDefaultTraces(verbosity=True), breakOnResets=tm.mmGetTraceResets()) return ys, allLabels
def runNetworkOnSequences(self, inputSequences, inputCategories, tmLearn=True, upLearn=None, classifierLearn=False, verbosity=0, progressInterval=None): """ Runs Union Pooler network on specified sequence. @param inputSequences One or more sequences of input patterns. Each should be terminated with None. @param inputCategories A sequence of category representations for each element in inputSequences Each should be terminated with None. @param tmLearn: (bool) Temporal Memory learning mode @param upLearn: (None, bool) Union Pooler learning mode. If None, Union Pooler will not be run. @param classifierLearn: (bool) Classifier learning mode @param progressInterval: (int) Interval of console progress updates in terms of timesteps. """ currentTime = time.time() for i in xrange(len(inputSequences)): sensorPattern = inputSequences[i] inputCategory = inputCategories[i] self.runNetworkOnPattern(sensorPattern, tmLearn=tmLearn, upLearn=upLearn, sequenceLabel=inputCategory) if classifierLearn and sensorPattern is not None: unionSDR = self.up.getUnionSDR() upCellCount = self.up.getColumnDimensions() self.classifier.learn(unionSDR, inputCategory, isSparse=upCellCount) if verbosity > 1: pprint.pprint("{0} is category {1}".format(unionSDR, inputCategory)) if progressInterval is not None and i > 0 and i % progressInterval == 0: elapsed = (time.time() - currentTime) / 60.0 print ("Ran {0} / {1} elements of sequence in " "{2:0.2f} minutes.".format(i, len(inputSequences), elapsed)) currentTime = time.time() print MonitorMixinBase.mmPrettyPrintMetrics( self.tm.mmGetDefaultMetrics()) if verbosity >= 2: traces = self.tm.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.tm.mmGetTraceResets()) if upLearn is not None: traces = self.up.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces(traces, breakOnResets= self.up.mmGetTraceResets()) print
def run(params, paramDir, outputDir, plotVerbosity=0, consoleVerbosity=0): """ Runs the noise robustness experiment. :param params: A dict containing the following experiment parameters: patternDimensionality - Dimensionality of sequence patterns patternCardinality - Cardinality (# ON bits) of sequence patterns sequenceLength - Length of sequences shown to network sequenceCount - Number of unique sequences used trainingPasses - Number of times Temporal Memory is trained on each sequence testPresentations - Number of sequences presented in test phase perturbationChance - Chance of sequence perturbations during test phase temporalMemoryParams - A dict of Temporal Memory parameter overrides unionPoolerParams - A dict of Union Pooler parameter overrides :param paramDir: Path of parameter file :param outputDir: Output will be written to this path :param plotVerbosity: Plotting verbosity :param consoleVerbosity: Console output verbosity """ print "Running Noise robustness experiment...\n" print "Params dir: {0}".format(os.path.join(os.path.dirname(__file__), paramDir)) print "Output dir: {0}\n".format(os.path.join(os.path.dirname(__file__), outputDir)) patternDimensionality = params["patternDimensionality"] patternCardinality = params["patternCardinality"] sequenceLength = params["sequenceLength"] sequenceCount = params["numberOfSequences"] trainingPasses = params["trainingPasses"] testPresentations = params["testPresentations"] perturbationChance = params["perturbationChance"] tmParamOverrides = params["temporalMemoryParams"] upParamOverrides = params["unionPoolerParams"] # TODO If this parameter is to be supported, the sequence generation # code below must change # Number of unique patterns from which sequences are built # patternAlphabetSize = params["patternAlphabetSize"] # Generate a sequence list and an associated labeled list (both containing a # set of sequences separated by None) start = time.time() print "Generating sequences..." patternAlphabetSize = sequenceLength * sequenceCount patternMachine = PatternMachine(patternDimensionality, patternCardinality, patternAlphabetSize) sequenceMachine = SequenceMachine(patternMachine) numbers = sequenceMachine.generateNumbers(sequenceCount, sequenceLength) generatedSequences = sequenceMachine.generateFromNumbers(numbers) sequenceLabels = [str(numbers[i + i*sequenceLength: i + (i+1)*sequenceLength]) for i in xrange(sequenceCount)] labeledSequences = [] for label in sequenceLabels: for _ in xrange(sequenceLength): labeledSequences.append(label) labeledSequences.append(None) # Set up the Temporal Memory and Union Pooler network print "\nCreating network..." experiment = UnionPoolerExperiment(tmParamOverrides, upParamOverrides) # Train only the Temporal Memory on the generated sequences if trainingPasses > 0: print "\nTraining Temporal Memory..." if consoleVerbosity > 0: print "\nPass\tMean\t\tStdDev\t\tMax\t\t(Bursting Columns)" for i in xrange(trainingPasses): experiment.runNetworkOnSequence(generatedSequences, labeledSequences, tmLearn=True, upLearn=None, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 0: stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) # Reset the TM monitor mixin's records accrued during this training pass # experiment.tm.mmClearHistory() print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print if plotVerbosity >= 2: plotNetworkState(experiment, plotVerbosity, trainingPasses, phase="Training") print "\nRunning test phase..." # Input sequence pattern by pattern. Sequence-to-sequence # progression is randomly selected. At each step there is a chance of # perturbation. Specifically the following # perturbations may occur: # Establish a baseline without noise # Establish a baseline adding the following perturbations one-by-one # 1) substitution of some other pattern for the normal expected pattern # 2) skipping expected pattern and presenting next pattern in sequence # 3) addition of some other pattern putting off expected pattern one time step # Finally measure performance on more complex perturbation # TODO 4) Jump to another sequence randomly (Random jump to start or random # position?) runTestPhase(experiment, generatedSequences, sequenceCount, sequenceLength, testPresentations, perturbationChance) print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print # plotNetworkState(experiment, plotVerbosity, trainingPasses, phase="Testing") elapsed = int(time.time() - start) print "Total time: {0:2} seconds.".format(elapsed) ## Write Union SDR trace # metricName = "activeCells" # outputFileName = "unionSdrTrace_{0}learningPasses.csv".format(trainingPasses) # writeMetricTrace(experiment, metricName, outputDir, outputFileName) if plotVerbosity >= 1: raw_input("\nPress any key to exit...")
def run(params, paramDir, outputDir, plotVerbosity=0, consoleVerbosity=0): """ Runs the union overlap experiment. :param params: A dict of experiment parameters :param paramDir: Path of parameter file :param outputDir: Output will be written to this path :param plotVerbosity: Plotting verbosity :param consoleVerbosity: Console output verbosity """ print "Running SDR overlap experiment...\n" print "Params dir: {0}".format(paramDir) print "Output dir: {0}\n".format(outputDir) # Dimensionality of sequence patterns patternDimensionality = params["patternDimensionality"] # Cardinality (ON / true bits) of sequence patterns patternCardinality = params["patternCardinality"] # TODO If this parameter is to be supported, the sequence generation code # below must change # Number of unique patterns from which sequences are built # patternAlphabetSize = params["patternAlphabetSize"] # Length of sequences shown to network sequenceLength = params["sequenceLength"] # Number of sequences used. Sequences may share common elements. numberOfSequences = params["numberOfSequences"] # Number of sequence passes for training the TM. Zero => no training. trainingPasses = params["trainingPasses"] tmParamOverrides = params["temporalMemoryParams"] upParamOverrides = params["unionPoolerParams"] # Generate a sequence list and an associated labeled list (both containing a # set of sequences separated by None) start = time.time() print "\nGenerating sequences..." patternAlphabetSize = sequenceLength * numberOfSequences patternMachine = PatternMachine(patternDimensionality, patternCardinality, patternAlphabetSize) sequenceMachine = SequenceMachine(patternMachine) numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength) generatedSequences = sequenceMachine.generateFromNumbers(numbers) sequenceLabels = [str(numbers[i + i*sequenceLength: i + (i+1)*sequenceLength]) for i in xrange(numberOfSequences)] labeledSequences = [] for label in sequenceLabels: for _ in xrange(sequenceLength): labeledSequences.append(label) labeledSequences.append(None) # Set up the Temporal Memory and Union Pooler network print "\nCreating network..." experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides) # Train only the Temporal Memory on the generated sequences if trainingPasses > 0: print "\nTraining Temporal Memory..." if consoleVerbosity > 0: print "\nPass\tBursting Columns Mean\tStdDev\tMax" for i in xrange(trainingPasses): experiment.runNetworkOnSequences(generatedSequences, labeledSequences, tmLearn=True, upLearn=None, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 0: stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) # Reset the TM monitor mixin's records accrued during this training pass experiment.tm.mmClearHistory() print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print if plotVerbosity >= 2: plotNetworkState(experiment, plotVerbosity, trainingPasses, phase="Training") print "\nRunning test phase..." experiment.runNetworkOnSequences(generatedSequences, labeledSequences, tmLearn=False, upLearn=False, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) print "\nPass\tBursting Columns Mean\tStdDev\tMax" stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2]) if trainingPasses > 0 and stats[0] > 0: print "***WARNING! MEAN BURSTING COLUMNS IN TEST PHASE IS GREATER THAN 0***" print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print plotNetworkState(experiment, plotVerbosity, trainingPasses, phase="Testing") elapsed = int(time.time() - start) print "Total time: {0:2} seconds.".format(elapsed) # Write Union SDR trace metricName = "activeCells" outputFileName = "unionSdrTrace_{0}learningPasses.csv".format(trainingPasses) writeMetricTrace(experiment, metricName, outputDir, outputFileName) if plotVerbosity >= 1: raw_input("Press any key to exit...")
def main(): print "Initializing robot..." robot = Robot() print "Initializing model..." model = Model() print "Initializing plot..." plot = Plot(model) print "Initializing classifier..." classifier = Classifier() with open(OUTFILE_PATH, "wb") as csvFile: csvWriter = csv.writer(csvFile) for i in count(1): behaviorType = None while behaviorType is None: behaviorType = raw_input("Enter behavior type: " "Exhaustive (e), Random (r), " "Sweep (s), User (u): ") behaviorType = behaviorType if behaviorType in ["e", "r", "s", "u"] else None targets = None while targets is None: try: targets = input("Enter targets (Python code returning a list): ") targets = targets if type(targets) is list and len(targets) else None except: pass def callback(sensorValue, current, target): motorValue = target - current row = [sensorValue, motorValue, i] csvWriter.writerow(row) csvFile.flush() model.feed(sensorValue, motorValue, sequenceLabel=i) tpActiveCells = model.experimentRunner.tp.mmGetTraceActiveCells().data[-1] classification = classifier.feed(tpActiveCells) print "Current: {0}\tSensor: {1}\tNext: {2}\tClassification: {3}".format( current, sensorValue, target, classification) if classification is not None: robot.playTune(classification) plot.update(model) if behaviorType == "s": sweep(targets, robot, callback) elif behaviorType == "e": exhaustive(targets, robot, callback) elif behaviorType == "r": randomlyExplore(targets, robot, callback) print MonitorMixinBase.mmPrettyPrintTraces( model.experimentRunner.tm.mmGetDefaultTraces(verbosity=2) + model.experimentRunner.tp.mmGetDefaultTraces(verbosity=2), breakOnResets=model.experimentRunner.tm.mmGetTraceResets()) print MonitorMixinBase.mmPrettyPrintMetrics( model.experimentRunner.tm.mmGetDefaultMetrics() + model.experimentRunner.tp.mmGetDefaultMetrics()) robot.reset() doReset = None while doReset is None: doReset = raw_input("Reset (y/n)? ") doReset = doReset if doReset in ["y", "n"] else None if doReset == "y": model.experimentRunner.tm.reset() model.experimentRunner.tp.reset() model.experimentRunner.tm.mmClearHistory() model.experimentRunner.tp.mmClearHistory()
tmLearn=False, tpLearn=False, verbosity=VERBOSITY, showProgressInterval=SHOW_PROGRESS_INTERVAL) print "Done testing.\n" if VERBOSITY >= 2: print "TP Stability:" print print runner.tp.mmPrettyPrintDataStabilityConfusion() print "TP Distinctness:" print print runner.tp.mmPrettyPrintDataDistinctnessConfusion() print print MonitorMixinBase.mmPrettyPrintMetrics( runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print header = ["# worlds", "# elements"] if not headerWritten else None row = [numWorlds, numElements] for metric in (runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()): row += [ metric.min, metric.max, metric.sum, metric.mean, metric.standardDeviation ] if header: header += [
def runTestPhase(experiment, tmLearn=False, upLearn=True, outputfileName='results/TemporalPoolingOutputs.pdf'): print "\nRunning test phase..." print "tmLearn: ", tmLearn print "upLearn: ", upLearn inputSequences = generatedSequences inputCategories = labeledSequences experiment.tm.mmClearHistory() experiment.up.mmClearHistory() experiment.tm.reset() experiment.up.reset() # Persistence levels across time poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 0)) # union SDR across time activeCellsTrace = numpy.zeros((experiment.up._numColumns, 0)) # active cells in SP across time activeSPTrace = numpy.zeros((experiment.up._numColumns, 0)) # number of connections for SP cells connectionCountTrace = numpy.zeros((experiment.up._numColumns, 0)) # number of active inputs per SP cells activeOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0)) # number of predicted active inputs per SP cells predictedActiveOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0)) for _ in xrange(trainingPasses): experiment.tm.reset() experiment.up.reset() for i in xrange(len(inputSequences)): sensorPattern = inputSequences[i] inputCategory = inputCategories[i] if sensorPattern is None: pass else: experiment.tm.compute(sensorPattern, learn=tmLearn, sequenceLabel=inputCategory) activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput( ) overlapsActive = experiment.up._calculateOverlap(activeCells) overlapsPredictedActive = experiment.up._calculateOverlap( predActiveCells) activeOverlapsTrace = numpy.concatenate( (activeOverlapsTrace, overlapsActive.reshape( (experiment.up._numColumns, 1))), 1) predictedActiveOverlapsTrace = numpy.concatenate( (predictedActiveOverlapsTrace, overlapsPredictedActive.reshape( (experiment.up._numColumns, 1))), 1) experiment.up.compute(activeCells, predActiveCells, learn=upLearn, sequenceLabel=inputCategory) currentPoolingActivation = experiment.up._poolingActivation.reshape( (experiment.up._numColumns, 1)) poolingActivationTrace = numpy.concatenate( (poolingActivationTrace, currentPoolingActivation), 1) currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1)) currentUnionSDR[experiment.up._unionSDR] = 1 activeCellsTrace = numpy.concatenate( (activeCellsTrace, currentUnionSDR), 1) currentSPSDR = numpy.zeros((experiment.up._numColumns, 1)) currentSPSDR[experiment.up._activeCells] = 1 activeSPTrace = numpy.concatenate( (activeSPTrace, currentSPSDR), 1) connectionCountTrace = numpy.concatenate( (connectionCountTrace, experiment.up._connectedCounts.reshape( (experiment.up._numColumns, 1))), 1) print "\nPass\tBursting Columns Mean\tStdDev\tMax" stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2]) print print MonitorMixinBase.mmPrettyPrintMetrics(\ experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print experiment.tm.mmClearHistory() newConnectionCountTrace = numpy.zeros(connectionCountTrace.shape) n = newConnectionCountTrace.shape[1] newConnectionCountTrace[:, 0:n - 2] = connectionCountTrace[:, 1:n - 1] - connectionCountTrace[:, 0: n - 2] # estimate fraction of shared bits across adjacent time point unionSDRshared = experiment.up._mmComputeUnionSDRdiff() bitLifeList = experiment.up._mmComputeBitLifeStats() bitLife = numpy.array(bitLifeList) # Plot SP outputs, UP persistence and UP outputs in testing phase def showSequenceStartLine(ax, trainingPasses, sequenceLength): for i in xrange(trainingPasses): ax.vlines(i * sequenceLength, 0, ax1.get_ylim()[0], linestyles='--') ncolShow = 50 f, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1, ncols=4) ax1.imshow(activeSPTrace[1:ncolShow, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax1, trainingPasses, sequenceLength) ax1.set_title('SP SDR') ax1.set_ylabel('Columns') ax2.imshow(poolingActivationTrace[1:ncolShow, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax2, trainingPasses, sequenceLength) ax2.set_title('Persistence') ax3.imshow(activeCellsTrace[1:ncolShow, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax3, trainingPasses, sequenceLength) ax3.set_title('Union SDR') ax4.imshow(newConnectionCountTrace[1:ncolShow, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax4, trainingPasses, sequenceLength) ax4.set_title('New Connection #') ax2.set_xlabel('Time (steps)') pp = PdfPages(outputfileName) pp.savefig() pp.close()
def run(params, paramDir, outputDir, plotVerbosity=0, consoleVerbosity=0): """ Runs the Union Pooler capacity experiment. :param params: A dict containing the following experiment parameters: patternDimensionality - Dimensionality of sequence patterns patternCardinality - Cardinality (# ON bits) of sequence patterns sequenceLength - Length of sequences shown to network sequenceCount - Number of unique sequences used trainingPasses - Number of times Temporal Memory is trained on each sequence temporalMemoryParams - A dict of Temporal Memory parameter overrides unionPoolerParams - A dict of Union Pooler parameter overrides :param paramDir: Path of parameter file :param outputDir: Output will be written to this path :param plotVerbosity: Plotting verbosity :param consoleVerbosity: Console output verbosity """ start = time.time() print "Running Union Pooler Capacity Experiment...\n" print "Params dir: {0}".format(os.path.join(os.path.dirname(__file__), paramDir)) print "Output dir: {0}\n".format(os.path.join(os.path.dirname(__file__), outputDir)) patternDimensionality = params["patternDimensionality"] patternCardinality = params["patternCardinality"] sequenceLength = params["sequenceLength"] sequenceCount = params["numberOfSequences"] trainingPasses = params["trainingPasses"] tmParamOverrides = params["temporalMemoryParams"] upParamOverrides = params["unionPoolerParams"] # Generate input data inputSequences, seqLabels = generateSequences(patternDimensionality, patternCardinality, sequenceLength, sequenceCount) print "\nCreating Network..." experiment = UnionPoolerExperiment(tmParamOverrides, upParamOverrides) # Train the Temporal Memory on the generated sequences print "\nTraining Temporal Memory..." for i in xrange(trainingPasses): print "\nTraining pass {0} ...\n".format(i) experiment.runNetworkOnSequences(inputSequences, seqLabels, tmLearn=True, upLearn=None, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 0: stats = experiment.getBurstingColumnsStats() print "\nPass\tMean\t\tStdDev\t\tMax\t\t(Bursting Columns)" print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print experiment.tm.mmClearHistory() # Run test phase recording Union SDRs unionSdrs = runTestPhase(experiment, inputSequences, seqLabels, sequenceCount, sequenceLength, consoleVerbosity) # Output distinctness metric print "\nSequences\tDistinctness Ave\tStdDev\tMax" ave, stdDev, maxDist = getDistinctness(unionSdrs) print "{0}\t{1}\t{2}\t{3}".format(sequenceCount, ave, stdDev, maxDist) # Check bursting columns metric during test phase print "\nSequences\tBursting Columns Mean\tStdDev\tMax" stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(sequenceCount, stats[0], stats[1], stats[2]) if trainingPasses > 0 and stats[0] > 0: print "***Warning! Mean bursing columns > 0 in test phase***" print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print print "Total time: {0:2} seconds.".format(int(time.time() - start))
def runTestPhase(experiment, tmLearn=False, upLearn=True, outputfileName='results/TemporalPoolingOutputs.pdf'): print "\nRunning test phase..." print "tmLearn: ", tmLearn print "upLearn: ", upLearn inputSequences = generatedSequences inputCategories = labeledSequences experiment.tm.mmClearHistory() experiment.up.mmClearHistory() experiment.tm.reset() experiment.up.reset() # Persistence levels across time poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 0)) # union SDR across time activeCellsTrace = numpy.zeros((experiment.up._numColumns, 0)) # active cells in SP across time activeSPTrace = numpy.zeros((experiment.up._numColumns, 0)) # number of connections for SP cells connectionCountTrace = numpy.zeros((experiment.up._numColumns, 0)) # number of active inputs per SP cells activeOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0)) # number of predicted active inputs per SP cells predictedActiveOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0)) for _ in xrange(trainingPasses): experiment.tm.reset() experiment.up.reset() for i in xrange(len(inputSequences)): sensorPattern = inputSequences[i] inputCategory = inputCategories[i] if sensorPattern is None: pass else: experiment.tm.compute(sensorPattern, formInternalConnections=True, learn=tmLearn, sequenceLabel=inputCategory) activeCells, predActiveCells, burstingCols, = experiment.getUnionPoolerInput() overlapsActive = experiment.up._calculateOverlap(activeCells) overlapsPredictedActive = experiment.up._calculateOverlap(predActiveCells) activeOverlapsTrace = numpy.concatenate((activeOverlapsTrace, overlapsActive.reshape((experiment.up._numColumns,1))), 1) predictedActiveOverlapsTrace = numpy.concatenate((predictedActiveOverlapsTrace, overlapsPredictedActive.reshape((experiment.up._numColumns,1))), 1) experiment.up.compute(activeCells, predActiveCells, learn=upLearn, sequenceLabel=inputCategory) currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1)) poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1) currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1)) currentUnionSDR[experiment.up._unionSDR] = 1 activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1) currentSPSDR = numpy.zeros((experiment.up._numColumns, 1)) currentSPSDR[experiment.up._activeCells] = 1 activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1) connectionCountTrace = numpy.concatenate((connectionCountTrace, experiment.up._connectedCounts.reshape((experiment.up._numColumns, 1))), 1) print "\nPass\tBursting Columns Mean\tStdDev\tMax" stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2]) print print MonitorMixinBase.mmPrettyPrintMetrics(\ experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print experiment.tm.mmClearHistory() newConnectionCountTrace = numpy.zeros(connectionCountTrace.shape) n = newConnectionCountTrace.shape[1] newConnectionCountTrace[:,0:n-2] = connectionCountTrace[:,1:n-1] - connectionCountTrace[:,0:n-2] # estimate fraction of shared bits across adjacent time point unionSDRshared = experiment.up._mmComputeUnionSDRdiff() bitLifeList = experiment.up._mmComputeBitLifeStats() bitLife = numpy.array(bitLifeList) # Plot SP outputs, UP persistence and UP outputs in testing phase def showSequenceStartLine(ax, trainingPasses, sequenceLength): for i in xrange(trainingPasses): ax.vlines(i*sequenceLength, 0, ax1.get_ylim()[0], linestyles='--') ncolShow = 50 f, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1,ncols=4) ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys,interpolation="nearest",aspect='auto') showSequenceStartLine(ax1, trainingPasses, sequenceLength) ax1.set_title('SP SDR') ax1.set_ylabel('Columns') ax2.imshow(poolingActivationTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto') showSequenceStartLine(ax2, trainingPasses, sequenceLength) ax2.set_title('Persistence') ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto') showSequenceStartLine(ax3, trainingPasses, sequenceLength) ax3.set_title('Union SDR') ax4.imshow(newConnectionCountTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto') showSequenceStartLine(ax4, trainingPasses, sequenceLength) ax4.set_title('New Connection #') ax2.set_xlabel('Time (steps)') pp = PdfPages(outputfileName) pp.savefig() pp.close()
def experiment1(): paramDir = 'params/1024_baseline/5_trainingPasses.yaml' outputDir = 'results/' params = yaml.safe_load(open(paramDir, 'r')) options = {'plotVerbosity': 2, 'consoleVerbosity': 2} plotVerbosity = 2 consoleVerbosity = 1 print "Running SDR overlap experiment...\n" print "Params dir: {0}".format(paramDir) print "Output dir: {0}\n".format(outputDir) # Dimensionality of sequence patterns patternDimensionality = params["patternDimensionality"] # Cardinality (ON / true bits) of sequence patterns patternCardinality = params["patternCardinality"] # TODO If this parameter is to be supported, the sequence generation code # below must change # Number of unique patterns from which sequences are built # patternAlphabetSize = params["patternAlphabetSize"] # Length of sequences shown to network sequenceLength = params["sequenceLength"] # Number of sequences used. Sequences may share common elements. numberOfSequences = params["numberOfSequences"] # Number of sequence passes for training the TM. Zero => no training. trainingPasses = params["trainingPasses"] tmParamOverrides = params["temporalMemoryParams"] upParamOverrides = params["unionPoolerParams"] # Generate a sequence list and an associated labeled list (both containing a # set of sequences separated by None) start = time.time() print "\nGenerating sequences..." patternAlphabetSize = sequenceLength * numberOfSequences patternMachine = PatternMachine(patternDimensionality, patternCardinality, patternAlphabetSize) sequenceMachine = SequenceMachine(patternMachine) numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength) generatedSequences = sequenceMachine.generateFromNumbers(numbers) sequenceLabels = [ str(numbers[i + i * sequenceLength:i + (i + 1) * sequenceLength]) for i in xrange(numberOfSequences) ] labeledSequences = [] for label in sequenceLabels: for _ in xrange(sequenceLength): labeledSequences.append(label) labeledSequences.append(None) # Set up the Temporal Memory and Union Pooler network print "\nCreating network..." experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides) # Train only the Temporal Memory on the generated sequences if trainingPasses > 0: print "\nTraining Temporal Memory..." if consoleVerbosity > 0: print "\nPass\tBursting Columns Mean\tStdDev\tMax" for i in xrange(trainingPasses): experiment.runNetworkOnSequences( generatedSequences, labeledSequences, tmLearn=True, upLearn=None, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 0: stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) # Reset the TM monitor mixin's records accrued during this training pass # experiment.tm.mmClearHistory() print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print experiment.tm.mmClearHistory() experiment.up.mmClearHistory() print "\nRunning test phase..." inputSequences = generatedSequences inputCategories = labeledSequences tmLearn = True upLearn = False classifierLearn = False currentTime = time.time() experiment.tm.reset() experiment.up.reset() poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 1)) activeCellsTrace = numpy.zeros((experiment.up._numColumns, 1)) activeSPTrace = numpy.zeros((experiment.up._numColumns, 1)) for _ in xrange(trainingPasses): experiment.tm.reset() for i in xrange(len(inputSequences)): sensorPattern = inputSequences[i] inputCategory = inputCategories[i] if sensorPattern is None: pass else: experiment.tm.compute(sensorPattern, learn=tmLearn, sequenceLabel=inputCategory) if upLearn is not None: activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput( ) experiment.up.compute(activeCells, predActiveCells, learn=upLearn, sequenceLabel=inputCategory) currentPoolingActivation = experiment.up._poolingActivation currentPoolingActivation = experiment.up._poolingActivation.reshape( (experiment.up._numColumns, 1)) poolingActivationTrace = numpy.concatenate( (poolingActivationTrace, currentPoolingActivation), 1) currentUnionSDR = numpy.zeros( (experiment.up._numColumns, 1)) currentUnionSDR[experiment.up._unionSDR] = 1 activeCellsTrace = numpy.concatenate( (activeCellsTrace, currentUnionSDR), 1) currentSPSDR = numpy.zeros((experiment.up._numColumns, 1)) currentSPSDR[experiment.up._activeCells] = 1 activeSPTrace = numpy.concatenate( (activeSPTrace, currentSPSDR), 1) print "\nPass\tBursting Columns Mean\tStdDev\tMax" stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2]) print print MonitorMixinBase.mmPrettyPrintMetrics(\ experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print experiment.tm.mmClearHistory() # estimate fraction of shared bits across adjacent time point unionSDRshared = experiment.up._mmComputeUnionSDRdiff() bitLifeList = experiment.up._mmComputeBitLifeStats() bitLife = numpy.array(bitLifeList) # Plot SP outputs, UP persistence and UP outputs in testing phase def showSequenceStartLine(ax, trainingPasses, sequenceLength): for i in xrange(trainingPasses): ax.vlines(i * sequenceLength, 0, 100, linestyles='--') plt.figure() ncolShow = 100 f, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3) ax1.imshow(activeSPTrace[1:ncolShow, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax1, trainingPasses, sequenceLength) ax1.set_title('SP SDR') ax1.set_ylabel('Columns') ax2.imshow(poolingActivationTrace[1:100, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax2, trainingPasses, sequenceLength) ax2.set_title('Persistence') ax3.imshow(activeCellsTrace[1:ncolShow, :], cmap=cm.Greys, interpolation="nearest", aspect='auto') showSequenceStartLine(ax3, trainingPasses, sequenceLength) plt.title('Union SDR') ax2.set_xlabel('Time (steps)') pp = PdfPages('results/UnionPoolingOnLearnedTM_Experiment1.pdf') pp.savefig() pp.close() f, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1) ax1.plot((sum(activeCellsTrace)) / experiment.up._numColumns * 100) ax1.set_ylabel('Union SDR size (%)') ax1.set_xlabel('Time (steps)') ax1.set_ylim(0, 25) ax2.plot(unionSDRshared) ax2.set_ylabel('Shared Bits') ax2.set_xlabel('Time (steps)') ax3.hist(bitLife) ax3.set_xlabel('Life duration for each bit') pp = PdfPages('results/UnionSDRproperty_Experiment1.pdf') pp.savefig() pp.close()
def run(params, paramDir, outputDir, plotVerbosity=0, consoleVerbosity=0): """ Runs the Union Pooler capacity experiment. :param params: A dict containing the following experiment parameters: patternDimensionality - Dimensionality of sequence patterns patternCardinality - Cardinality (# ON bits) of sequence patterns sequenceLength - Length of sequences shown to network sequenceCount - Number of unique sequences used trainingPasses - Number of times Temporal Memory is trained on each sequence temporalMemoryParams - A dict of Temporal Memory parameter overrides unionPoolerParams - A dict of Union Pooler parameter overrides :param paramDir: Path of parameter file :param outputDir: Output will be written to this path :param plotVerbosity: Plotting verbosity :param consoleVerbosity: Console output verbosity """ start = time.time() print "Running Union Pooler Capacity Experiment...\n" print "Params dir: {0}".format( os.path.join(os.path.dirname(__file__), paramDir)) print "Output dir: {0}\n".format( os.path.join(os.path.dirname(__file__), outputDir)) patternDimensionality = params["patternDimensionality"] patternCardinality = params["patternCardinality"] sequenceLength = params["sequenceLength"] sequenceCount = params["numberOfSequences"] trainingPasses = params["trainingPasses"] tmParamOverrides = params["temporalMemoryParams"] upParamOverrides = params["unionPoolerParams"] # Generate input data inputSequences, seqLabels = generateSequences(patternDimensionality, patternCardinality, sequenceLength, sequenceCount) print "\nCreating Network..." experiment = UnionPoolerExperiment(tmParamOverrides, upParamOverrides) # Train the Temporal Memory on the generated sequences print "\nTraining Temporal Memory..." for i in xrange(trainingPasses): print "\nTraining pass {0} ...\n".format(i) experiment.runNetworkOnSequences( inputSequences, seqLabels, tmLearn=True, upLearn=None, verbosity=consoleVerbosity, progressInterval=_SHOW_PROGRESS_INTERVAL) if consoleVerbosity > 0: stats = experiment.getBurstingColumnsStats() print "\nPass\tMean\t\tStdDev\t\tMax\t\t(Bursting Columns)" print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics()) print experiment.tm.mmClearHistory() # Run test phase recording Union SDRs unionSdrs = runTestPhase(experiment, inputSequences, seqLabels, sequenceCount, sequenceLength, consoleVerbosity) # Output distinctness metric print "\nSequences\tDistinctness Ave\tStdDev\tMax" ave, stdDev, maxDist = getDistinctness(unionSdrs) print "{0}\t{1}\t{2}\t{3}".format(sequenceCount, ave, stdDev, maxDist) # Check bursting columns metric during test phase print "\nSequences\tBursting Columns Mean\tStdDev\tMax" stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(sequenceCount, stats[0], stats[1], stats[2]) if trainingPasses > 0 and stats[0] > 0: print "***Warning! Mean bursing columns > 0 in test phase***" print print MonitorMixinBase.mmPrettyPrintMetrics( experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print print "Total time: {0:2} seconds.".format(int(time.time() - start))
def experiment2(): paramDir = 'params/1024_baseline/5_trainingPasses.yaml' outputDir = 'results/' params = yaml.safe_load(open(paramDir, 'r')) options = {'plotVerbosity': 2, 'consoleVerbosity': 2} plotVerbosity = 2 consoleVerbosity = 1 print "Running SDR overlap experiment...\n" print "Params dir: {0}".format(paramDir) print "Output dir: {0}\n".format(outputDir) # Dimensionality of sequence patterns patternDimensionality = params["patternDimensionality"] # Cardinality (ON / true bits) of sequence patterns patternCardinality = params["patternCardinality"] # TODO If this parameter is to be supported, the sequence generation code # below must change # Number of unique patterns from which sequences are built # patternAlphabetSize = params["patternAlphabetSize"] # Length of sequences shown to network sequenceLength = params["sequenceLength"] # Number of sequences used. Sequences may share common elements. numberOfSequences = params["numberOfSequences"] # Number of sequence passes for training the TM. Zero => no training. trainingPasses = params["trainingPasses"] tmParamOverrides = params["temporalMemoryParams"] upParamOverrides = params["unionPoolerParams"] # Generate a sequence list and an associated labeled list (both containing a # set of sequences separated by None) start = time.time() print "\nGenerating sequences..." patternAlphabetSize = sequenceLength * numberOfSequences patternMachine = PatternMachine(patternDimensionality, patternCardinality, patternAlphabetSize) sequenceMachine = SequenceMachine(patternMachine) numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength) generatedSequences = sequenceMachine.generateFromNumbers(numbers) sequenceLabels = [str(numbers[i + i*sequenceLength: i + (i+1)*sequenceLength]) for i in xrange(numberOfSequences)] labeledSequences = [] for label in sequenceLabels: for _ in xrange(sequenceLength): labeledSequences.append(label) labeledSequences.append(None) # Set up the Temporal Memory and Union Pooler network print "\nCreating network..." experiment = UnionPoolerExperiment(tmParamOverrides, upParamOverrides) # Train only the Temporal Memory on the generated sequences # if trainingPasses > 0: # # print "\nTraining Temporal Memory..." # if consoleVerbosity > 0: # print "\nPass\tBursting Columns Mean\tStdDev\tMax" # # for i in xrange(trainingPasses): # experiment.runNetworkOnSequences(generatedSequences, # labeledSequences, # tmLearn=True, # upLearn=None, # verbosity=consoleVerbosity, # progressInterval=_SHOW_PROGRESS_INTERVAL) # # if consoleVerbosity > 0: # stats = experiment.getBurstingColumnsStats() # print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2]) # # # Reset the TM monitor mixin's records accrued during this training pass # # experiment.tm.mmClearHistory() # # print # print MonitorMixinBase.mmPrettyPrintMetrics( # experiment.tm.mmGetDefaultMetrics()) # print # # if plotVerbosity >= 2: # plotNetworkState(experiment, plotVerbosity, trainingPasses, phase="Training") # # experiment.tm.mmClearHistory() # experiment.up.mmClearHistory() print "\nRunning test phase..." inputSequences = generatedSequences inputCategories = labeledSequences tmLearn = True upLearn = False classifierLearn = False currentTime = time.time() experiment.tm.reset() experiment.up.reset() poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 1)) activeCellsTrace = numpy.zeros((experiment.up._numColumns, 1)) activeSPTrace = numpy.zeros((experiment.up._numColumns, 1)) for _ in xrange(trainingPasses): for i in xrange(len(inputSequences)): sensorPattern = inputSequences[i] inputCategory = inputCategories[i] if sensorPattern is None: pass else: experiment.tm.compute(sensorPattern, formInternalConnections=True, learn=tmLearn, sequenceLabel=inputCategory) if upLearn is not None: activeCells, predActiveCells, burstingCols, = experiment.getUnionPoolerInput() experiment.up.compute(activeCells, predActiveCells, learn=upLearn, sequenceLabel=inputCategory) currentPoolingActivation = experiment.up._poolingActivation currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1)) poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1) currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1)) currentUnionSDR[experiment.up._unionSDR] = 1 activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1) currentSPSDR = numpy.zeros((experiment.up._numColumns, 1)) currentSPSDR[experiment.up._activeCells] = 1 activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1) print "\nPass\tBursting Columns Mean\tStdDev\tMax" stats = experiment.getBurstingColumnsStats() print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2]) print print MonitorMixinBase.mmPrettyPrintMetrics(\ experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics()) print experiment.tm.mmClearHistory() # estimate fraction of shared bits across adjacent time point unionSDRshared = experiment.up._mmComputeUnionSDRdiff() bitLifeList = experiment.up._mmComputeBitLifeStats() bitLife = numpy.array(bitLifeList) # Plot SP outputs, UP persistence and UP outputs in testing phase def showSequenceStartLine(ax, trainingPasses, sequenceLength): for i in xrange(trainingPasses): ax.vlines(i*sequenceLength, 0, 100, linestyles='--') plt.figure() ncolShow = 100 f, (ax1, ax2, ax3) = plt.subplots(nrows=1,ncols=3) ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys,interpolation="nearest",aspect='auto') showSequenceStartLine(ax1, trainingPasses, sequenceLength) ax1.set_title('SP SDR') ax1.set_ylabel('Columns') ax2.imshow(poolingActivationTrace[1:100,:], cmap=cm.Greys, interpolation="nearest",aspect='auto') showSequenceStartLine(ax2, trainingPasses, sequenceLength) ax2.set_title('Persistence') ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto') showSequenceStartLine(ax3, trainingPasses, sequenceLength) plt.title('Union SDR') ax2.set_xlabel('Time (steps)') pp = PdfPages('results/UnionPoolingDuringTMlearning_Experiment2.pdf') pp.savefig() pp.close() f, (ax1, ax2, ax3) = plt.subplots(nrows=3,ncols=1) ax1.plot((sum(activeCellsTrace))/experiment.up._numColumns*100) ax1.set_ylabel('Union SDR size (%)') ax1.set_xlabel('Time (steps)') ax1.set_ylim(0,25) ax2.plot(unionSDRshared) ax2.set_ylabel('Shared Bits') ax2.set_xlabel('Time (steps)') ax3.hist(bitLife) ax3.set_xlabel('Life duration for each bit') pp = PdfPages('results/UnionSDRproperty_Experiment2.pdf') pp.savefig() pp.close()
def run(numWorlds, numElements, outputDir, params=DEFAULTS): # Extract params n = params["n"] w = params["w"] tmParams = params["tmParams"] tpParams = params["tpParams"] # Initialize output if not os.path.exists(outputDir): os.makedirs(outputDir) csvFilePath = os.path.join(outputDir, "{0}x{1}.csv".format(numWorlds, numElements)) # Initialize experiment start = time.time() universe = OneDUniverse(nSensor=n, wSensor=w, nMotor=n, wMotor=w) # Run the experiment with open(csvFilePath, 'wb') as csvFile: csvWriter = csv.writer(csvFile) print( "Experiment parameters: " "(# worlds = {0}, # elements = {1}, n = {2}, w = {3})".format( numWorlds, numElements, n, w)) print "Temporal memory parameters: {0}".format(tmParams) print "Temporal pooler parameters: {0}".format(tpParams) print print "Setting up experiment..." runner = SensorimotorExperimentRunner(tmOverrides=tmParams, tpOverrides=tpParams) print "Done setting up experiment." print exhaustiveAgents = [] randomAgents = [] completeSequenceLength = numElements**2 for world in xrange(numWorlds): elements = range(world * numElements, world * numElements + numElements) exhaustiveAgents.append( ExhaustiveOneDAgent(OneDWorld(universe, elements), 0)) possibleMotorValues = range(-numElements, numElements + 1) possibleMotorValues.remove(0) randomAgents.append( RandomOneDAgent(OneDWorld(universe, elements), numElements / 2, possibleMotorValues=possibleMotorValues)) print "Training (worlds: {0}, elements: {1})...".format( numWorlds, numElements) print print "Training temporal memory..." sequences = runner.generateSequences(completeSequenceLength * 2, exhaustiveAgents, verbosity=VERBOSITY) runner.feedLayers(sequences, tmLearn=True, tpLearn=False, verbosity=VERBOSITY, showProgressInterval=SHOW_PROGRESS_INTERVAL) print print MonitorMixinBase.mmPrettyPrintMetrics( runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print print "Training temporal pooler..." sequences = runner.generateSequences(completeSequenceLength * 1, exhaustiveAgents, verbosity=VERBOSITY) runner.feedLayers(sequences, tmLearn=False, tpLearn=True, verbosity=VERBOSITY, showProgressInterval=SHOW_PROGRESS_INTERVAL) print print "Done training." print print MonitorMixinBase.mmPrettyPrintMetrics( runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print if PLOT >= 1: runner.tp.mmGetPlotConnectionsPerColumn( title="worlds: {0}, elements: {1}".format( numWorlds, numElements)) print "Testing (worlds: {0}, elements: {1})...".format( numWorlds, numElements) sequences = runner.generateSequences(completeSequenceLength / 4, randomAgents, verbosity=VERBOSITY, numSequences=4) runner.feedLayers(sequences, tmLearn=False, tpLearn=False, verbosity=VERBOSITY, showProgressInterval=SHOW_PROGRESS_INTERVAL) print "Done testing.\n" if VERBOSITY >= 2: print "Overlap:" print print runner.tp.mmPrettyPrintDataOverlap() print print MonitorMixinBase.mmPrettyPrintMetrics( runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print elapsed = int(time.time() - start) print "Total time: {0:2} seconds.".format(elapsed) header = ["# worlds", "# elements", "duration"] row = [numWorlds, numElements, elapsed] for metric in (runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()): header += [ "{0} ({1})".format(metric.prettyPrintTitle(), x) for x in ["min", "max", "sum", "mean", "stddev"] ] row += [ metric.min, metric.max, metric.sum, metric.mean, metric.standardDeviation ] csvWriter.writerow(header) csvWriter.writerow(row) csvFile.flush() if PLOT >= 1: raw_input("Press any key to exit...")
def feedLayers(self, sequences, tmLearn, tpLearn=None, verbosity=0, showProgressInterval=None): """ Feed the given sequences to the HTM algorithms. @param tmLearn: (bool) Either False, or True @param tpLearn: (None,bool) Either None, False, or True. If None, temporal pooler will be skipped. @param showProgressInterval: (int) Prints progress every N iterations, where N is the value of this param """ (sensorSequence, motorSequence, sensorimotorSequence, sequenceLabels) = sequences self.tm.mmClearHistory() self.tp.mmClearHistory() currentTime = time.time() for i in xrange(len(sensorSequence)): sensorPattern = sensorSequence[i] sensorimotorPattern = sensorimotorSequence[i] sequenceLabel = sequenceLabels[i] if sensorPattern is None: self.tm.reset() self.tp.reset() else: # Feed the TM self.tm.compute(sensorPattern, activeExternalCells=sensorimotorPattern, formInternalConnections=False, learn=tmLearn, sequenceLabel=sequenceLabel) # If requested, feed the TP if tpLearn is not None: tpInputVector, burstingColumns, correctlyPredictedCells = ( self.formatInputForTP()) activeArray = numpy.zeros(self.tp.getNumColumns()) self.tp.compute(tpInputVector, tpLearn, activeArray, burstingColumns, correctlyPredictedCells, sequenceLabel=sequenceLabel) if (showProgressInterval is not None and i > 0 and i % showProgressInterval == 0): print ("Fed {0} / {1} elements of the sequence " "in {2:0.2f} seconds.".format( i, len(sensorSequence), time.time() - currentTime)) currentTime = time.time() if verbosity >= 2: traces = [] traces += self.tm.mmGetDefaultTraces(verbosity=verbosity) if tpLearn is not None: traces += self.tp.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces( traces, breakOnResets=self.tm.mmGetTraceResets()) print
randomAgents.append( RandomOneDAgent(OneDWorld(universe, elements), numElements / 2, possibleMotorValues=possibleMotorValues)) print "Training (worlds: {0}, elements: {1})...".format(numWorlds, numElements) sequences = runner.generateSequences(completeSequenceLength * 2, exhaustiveAgents, verbosity=VERBOSITY) runner.feedLayers(sequences, tmLearn=True, tpLearn=True, verbosity=VERBOSITY, showProgressInterval=SHOW_PROGRESS_INTERVAL) print "Done training.\n" print MonitorMixinBase.mmPrettyPrintMetrics( runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics()) print if PLOT >= 1: runner.tp.mmGetPlotConnectionsPerColumn( title="worlds: {0}, elements: {1}".format(numWorlds, numElements)) print "Testing (worlds: {0}, elements: {1})...".format(numWorlds, numElements) sequences = runner.generateSequences(completeSequenceLength, randomAgents, verbosity=VERBOSITY) runner.feedLayers(sequences, tmLearn=False, tpLearn=False, verbosity=VERBOSITY, showProgressInterval=SHOW_PROGRESS_INTERVAL)
def feedLayers(self, sequences, tmLearn, tpLearn=None, verbosity=0, showProgressInterval=None): """ Feed the given sequences to the HTM algorithms. @param tmLearn: (bool) Either False, or True @param tpLearn: (None,bool) Either None, False, or True. If None, temporal pooler will be skipped. @param showProgressInterval: (int) Prints progress every N iterations, where N is the value of this param """ (sensorSequence, motorSequence, sensorimotorSequence, sequenceLabels) = sequences self.tm.mmClearHistory() self.tp.mmClearHistory() currentTime = time.time() for i in xrange(len(sensorSequence)): sensorPattern = sensorSequence[i] sensorimotorPattern = sensorimotorSequence[i] sequenceLabel = sequenceLabels[i] if sensorPattern is None: self.tm.reset() self.tp.reset() else: # Feed the TM self.tm.compute(sensorPattern, activeExternalCells=sensorimotorPattern, formInternalConnections=False, learn=tmLearn, sequenceLabel=sequenceLabel) # If requested, feed the TP if tpLearn is not None: tpInputVector, burstingColumns, correctlyPredictedCells = ( self.formatInputForTP()) activeArray = numpy.zeros(self.tp.getNumColumns()) self.tp.compute(tpInputVector, tpLearn, activeArray, burstingColumns, correctlyPredictedCells, sequenceLabel=sequenceLabel) if (showProgressInterval is not None and i > 0 and i % showProgressInterval == 0): print( "Fed {0} / {1} elements of the sequence " "in {2:0.2f} seconds.".format( i, len(sensorSequence), time.time() - currentTime)) currentTime = time.time() if verbosity >= 2: traces = [] traces += self.tm.mmGetDefaultTraces(verbosity=verbosity) if tpLearn is not None: traces += self.tp.mmGetDefaultTraces(verbosity=verbosity) print MonitorMixinBase.mmPrettyPrintTraces( traces, breakOnResets=self.tm.mmGetTraceResets()) print