class Brain(object): def __init__(self): self.tm = MonitoredSensorimotorTemporalMemory(**TM_PARAMS) self.sp = SpatialPooler(**SP_PARAMS) def consume_motion(self, sensor_input, motor_input, human_readable_sensor_value): # Rather than connecting the sensor input directly to columns, spatial pool over the input. # One example where this becomes necessary: when you combine different granularities of vision. # When a shape moves out of low-granularity vision to high-granularity vision, it needs to expect a vague mix # of white and black pixels, without being surprised by any particular pixel. sp_output = numpy.zeros((COLUMN_COUNT,), dtype="int") self.sp.compute(inputVector=sensor_input, learn=True, activeArray=sp_output) active_sensor_columns = set(numpy.where(sp_output > 0)[0]) motor_pattern_no_collisions = set(map(lambda x: x + COLUMN_COUNT, motor_input)) sensorimotor_pattern = active_sensor_columns.union(motor_pattern_no_collisions) self.tm.compute(active_sensor_columns, activeExternalCells=sensorimotor_pattern, formInternalConnections=False, learn=True, sequenceLabel=str(human_readable_sensor_value)) print self.tm.mmPrettyPrintMetrics(self.tm.mmGetDefaultMetrics()) return {"sp_output": list(get_indices_of_1(sp_output))} def get_predictions_for_action(self, message): raise Exception("Not implemented")
def __init__(self, config): # Calculate the size of input and col space inputsize = np.array(config['inputDimensions']).prod() colsize = np.array(config['columnDimensions']).prod() # save colsize and data type self.colsize = colsize self.datatype = config['uintType'] self.numIterations = config['numIterations'] # setup the pooler and reference to active column holder self.sp = SpatialPooler( inputDimensions=config['inputDimensions'], columnDimensions=config['columnDimensions'], potentialRadius=int(config['potentialRadius'] * inputsize), numActiveColumnsPerInhArea=math.ceil( config['amountActiveCols'] * colsize), globalInhibition=config['inhibition'] ) # reference to active columns set that is output of the spatial pooler self.activeColumns = np.zeros(colsize, config['uintType']) # setup the temporal pooler self.tm = TemporalMemory( columnDimensions=config['columnDimensions'], cellsPerColumn=config['cellsPerColumn'] )
def testUpdateDutyCycleHelper(self): """ Tests that duty cycles are updated properly according to the mathematical formula. also check the effects of supplying a maxPeriod to the function. """ dc = numpy.zeros(5) dc = numpy.array([1000.0, 1000.0, 1000.0, 1000.0, 1000.0]) period = 1000 newvals = numpy.zeros(5) newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period) trueNewDc = [999, 999, 999, 999, 999] self.assertListEqual(list(newDc),trueNewDc) dc = numpy.array([1000.0, 1000.0, 1000.0, 1000.0, 1000.0]) period = 1000 newvals = numpy.zeros(5) newvals.fill(1000) newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period) trueNewDc = list(dc) self.assertListEqual(list(newDc), trueNewDc) dc = numpy.array([1000, 1000, 1000, 1000, 1000]) newvals = numpy.array([2000, 4000, 5000, 6000, 7000]) period = 1000 newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period) trueNewDc = [1001, 1003, 1004, 1005, 1006] self.assertListEqual(list(newDc),trueNewDc) dc = numpy.array([1000, 800, 600, 400, 2000]) newvals = numpy.zeros(5) period = 2 newDc = SpatialPooler._updateDutyCyclesHelper(dc, newvals, period) trueNewDc = [500, 400, 300, 200, 1000] self.assertListEqual(list(newDc), trueNewDc)
class Example(): """A class to hold our code. Going object oriented""" def __init__(self, inputShape, columnDimensions): """ Parameters: ---------- _inputShape : The size of the input. The product of the first and second elements of this parameter determines the size of the input vectors _columnDimensions: The size of the 2 dimensional array of columns """ self.inputShape = inputShape self.columnDimensions = columnDimensions self.inputSize = np.array(inputShape).prod() self.columnNumber = np.array(columnDimensions).prod() self.inputArray = np.zeros(self.inputSize) self.activeArray = np.zeros(self.columnNumber) self.sp = SP(self.inputShape, self.columnDimensions, potentialRadius=self.inputSize, numActiveColumnsPerInhArea=int(0.02 * self.columnNumber), globalInhibition=True, synPermActiveInc=0.01) def create_input(self): """create a random input vector""" #clear the inputArray to zero before creating a new input vector self.inputArray[0:] = 0 for i in range(self.inputSize): #randrange returns 0 or 1 self.inputArray[i] = randrange(2) def run(self): """Run the spatial pooler with the input vector""" #activeArray[column]=1 if column is active after spatial pooling self.sp.compute(self.inputArray, True, self.activeArray) print self.activeArray.nonzero() def add_noise(self, noise_level): """Flip the value of 10% of input bits (add noise) PARAMETERS ---------- noise_level : The percentage of total input bits that should be flipped """ for i in range(int(noise_level * self.inputSize)): #0.1*self.inputSize represents 10% of the total input bits #random.random() returns a float between 0 and 1 randomPosition = int(random() * self.inputSize) #Flipping the bit at the randomly picked position if self.inputArray[randomPosition] == 1: self.inputArray[randomPosition] = 0 else: self.inputArray[randomPosition] = 1
def initModules(self, categories, inputIdx): modulesNames = {'wordSP', 'wordTM', 'actionSP', 'actionTM', 'generalTM'} if (self.modulesParams is not None) and\ (set(self.modulesParams) == modulesNames): self.modulesParams['wordSP'].update(self.defaultWordSPParams) self.modulesParams['wordTM'].update(self.defaultWordTMParams) self.modulesParams['actionSP'].update(self.defaultActionSPParams) self.modulesParams['actionTM'].update(self.defaultActionTMParams) self.wordSP = SpatialPooler(**self.modulesParams['wordSP']) self.wordTM = TemporalMemory(**self.modulesParams['wordTM']) self.actionSP = SpatialPooler(**self.modulesParams['actionSP']) self.actionTM = TemporalMemory(**self.modulesParams['actionTM']) defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.modulesParams['generalTM'].update(defaultGeneralTMParams) self.generalTM = TemporalMemory(**self.modulesParams['generalTM']) print("Using external Parameters!") else: self.wordSP = SpatialPooler(**self.defaultWordSPParams) self.wordTM = TemporalMemory(**self.defaultWordTMParams) self.actionSP = SpatialPooler(**self.defaultActionSPParams) self.actionTM = TemporalMemory(**self.defaultActionTMParams) print("External parameters invalid or not found, using"\ " the default ones") defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.generalTM = TemporalMemory(**defaultGeneralTMParams) self.classifier = CLAClassifierCond( steps=[1, 2, 3], alpha=0.1, actValueAlpha=0.3, verbosity=0 ) self.startPointOverlap = CommonOverlap('==', 1, self.actionTM.columnDimensions, threshold=0.5)
def initModules(self, categories, inputIdx): modulesNames = { 'wordSP', 'wordTM', 'actionSP', 'actionTM', 'generalTM' } if (self.modulesParams is not None) and\ (set(self.modulesParams) == modulesNames): self.modulesParams['wordSP'].update(self.defaultWordSPParams) self.modulesParams['wordTM'].update(self.defaultWordTMParams) self.modulesParams['actionSP'].update(self.defaultActionSPParams) self.modulesParams['actionTM'].update(self.defaultActionTMParams) self.wordSP = SpatialPooler(**self.modulesParams['wordSP']) self.wordTM = TemporalMemory(**self.modulesParams['wordTM']) self.actionSP = SpatialPooler(**self.modulesParams['actionSP']) self.actionTM = TemporalMemory(**self.modulesParams['actionTM']) defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.modulesParams['generalTM'].update(defaultGeneralTMParams) self.generalTM = TemporalMemory(**self.modulesParams['generalTM']) print("Using external Parameters!") else: self.wordSP = SpatialPooler(**self.defaultWordSPParams) self.wordTM = TemporalMemory(**self.defaultWordTMParams) self.actionSP = SpatialPooler(**self.defaultActionSPParams) self.actionTM = TemporalMemory(**self.defaultActionTMParams) print("External parameters invalid or not found, using"\ " the default ones") defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.generalTM = TemporalMemory(**defaultGeneralTMParams) self.classifier = CLAClassifierCond(steps=[1, 2, 3], alpha=0.1, actValueAlpha=0.3, verbosity=0) self.startPointOverlap = CommonOverlap('==', 1, self.actionTM.columnDimensions, threshold=0.5)
def initModules(self, categories, inputIdx): modulesNames = {'generalSP', 'generalTM'} nWords = len(categories[inputIdx['wordInput']]) nActions = len(categories[inputIdx['actionInput']]) inputDimensions = max( self.wordEncoder.getWidth(), self.actionEncoder.getWidth() ) columnDimensions = (max((nWords + nActions), len(self.trainingData)) * 2, ) defaultGeneralSPParams = { 'inputDimensions': inputDimensions, 'columnDimensions': columnDimensions, 'seed': self.spSeed } defaultGeneralTMParams = { 'columnDimensions': columnDimensions, 'seed': self.tmSeed } if (self.modulesParams is not None) and\ (set(self.modulesParams) == modulesNames): self.modulesParams['generalSP'].update(defaultGeneralSPParams) self.modulesParams['generalTM'].update(defaultGeneralTMParams) self.generalSP = SpatialPooler(**self.modulesParams['generalSP']) self.generalTM = TemporalMemory(**self.modulesParams['generalTM']) print("Using external Parameters!") else: self.generalSP = SpatialPooler(**defaultGeneralSPParams) self.generalTM = TemporalMemory(**defaultGeneralTMParams) print("External parameters invalid or not found, using"\ " the default ones") self.classifier = CLAClassifierCond( steps=[1, 2], alpha=0.1, actValueAlpha=0.3, verbosity=0 )
def read(cls, proto): """Read state from proto object. proto: PyRegionProto capnproto object """ regionImpl = proto.regionImpl.as_struct(SPRegionProto) instance = cls(regionImpl.columnCount, regionImpl.inputWidth) instance.spatialImp = regionImpl.spatialImp instance.learningMode = regionImpl.learningMode instance.inferenceMode = regionImpl.inferenceMode instance.anomalyMode = regionImpl.anomalyMode instance.topDownMode = regionImpl.topDownMode spatialImp = regionImpl.spatialImp if spatialImp == 'py': instance._sfdr = PYSpatialPooler.read(regionImpl.spatialPooler) elif spatialImp == 'cpp': instance._sfdr = CPPSpatialPooler() instance._sfdr.read(regionImpl.spatialPooler) else: raise RuntimeError("Invalid spatialImp '{0}'. " "Legal values are: 'py', 'cpp'".format(spatialImp)) return instance
def read(cls, proto): """Read state from proto object. proto: PyRegionProto capnproto object """ regionImpl = proto.regionImpl.as_struct(SPRegionProto) instance = cls(regionImpl.columnCount, regionImpl.inputWidth) instance.spatialImp = regionImpl.spatialImp instance.learningMode = regionImpl.learningMode instance.inferenceMode = regionImpl.inferenceMode instance.anomalyMode = regionImpl.anomalyMode instance.topDownMode = regionImpl.topDownMode spatialImp = regionImpl.spatialImp if spatialImp == 'py': instance._sfdr = PYSpatialPooler.read(regionImpl.spatialPooler) elif spatialImp == 'cpp': instance._sfdr = CPPSpatialPooler() instance._sfdr.read(regionImpl.spatialPooler) else: raise RuntimeError( "Invalid spatialImp '{0}'. " "Legal values are: 'py', 'cpp'".format(spatialImp)) return instance
def main(): # Instantiate our spatial pooler sp = SpatialPooler( inputDimensions=32**2, # Size of image patch columnDimensions=16, # Number of potential features potentialRadius=10000, # Ensures 100% potential pool potentialPct=1, # Neurons can connect to 100% of input globalInhibition=True, numActiveColumnsPerInhArea=1, # Only one feature active at a time # All input activity can contribute to feature output stimulusThreshold=0, synPermInactiveDec=0.1, synPermActiveInc=0.1, synPermConnected=0.1, # Connected threshold maxBoost=3, seed=1956, # The seed that Grok uses spVerbosity=1) viewer = SPViewer(sp, screenWidth=512, screenHeight=600, imagePath='data/Image2.jpg', patchSide=32, patchOverlapPercent=0, epochCount=40, replayDelay=.1) viewer.run() finalWindow = viewer.screen pygame.image.save(finalWindow, "screenshot.jpg")
def initModules(self, categories, inputIdx): modulesNames = {'generalSP', 'generalTM'} nWords = len(categories[inputIdx['wordInput']]) nActions = len(categories[inputIdx['actionInput']]) inputDimensions = max( self.wordEncoder.getWidth(), self.actionEncoder.getWidth() ) columnDimensions = (4 * max((nWords + nActions), len(self.trainingData)), ) defaultGeneralSPParams = { 'inputDimensions': inputDimensions, 'columnDimensions': columnDimensions, 'seed': self.spSeed } defaultGeneralTMParams = { 'columnDimensions': columnDimensions, 'seed': self.tmSeed } if (self.modulesParams is not None) and\ (set(self.modulesParams) == modulesNames): self.modulesParams['generalSP'].update(defaultGeneralSPParams) self.modulesParams['generalTM'].update(defaultGeneralTMParams) self.generalSP = SpatialPooler(**self.modulesParams['generalSP']) self.generalTM = TemporalMemory(**self.modulesParams['generalTM']) else: self.generalSP = SpatialPooler(**defaultGeneralSPParams) self.generalTM = TemporalMemory(**defaultGeneralTMParams) self.classifier = CLAClassifierCond( steps=[1, 2, 3], alpha=0.1, actValueAlpha=0.3, verbosity=0 )
def testCompatibilityCppPyDirectCall2D(self): """Check SP implementations have same behavior with 2D input.""" pySp = PySpatialPooler(inputDimensions=[121, 1], columnDimensions=[30, 30]) cppSp = CPPSpatialPooler(inputDimensions=[121, 1], columnDimensions=[30, 30]) data = numpy.zeros([121, 1], dtype=uintType) for i in xrange(21): data[i][0] = 1 nCols = 900 d1 = numpy.zeros(nCols, dtype=uintType) d2 = numpy.zeros(nCols, dtype=uintType) pySp.compute(data, True, d1) # learn cppSp.compute(data, True, d2) d1 = d1.nonzero()[0].tolist() d2 = d2.nonzero()[0].tolist() self.assertListEqual(d1, d2, "SP outputs are not equal: \n%s \n%s" % (str(d1), str(d2)))
class Example(): """ """ def __init__(self, inputShape, columnDimensions): self.inputShape = inputShape self.columnDimensions = columnDimensions self.inputSize = np.array(inputShape).prod() self.columnNumber = np.array(columnDimensions).prod() self.inputArray = np.zeros(self.inputSize) self.activeArray = np.zeros(self.columnNumber) self.sp = SP(self.inputShape, self.columnDimensions, potentialRadius = self.inputSize, numActiveColumnsPerInhArea = int(0.02*self.columnNumber), globalInhibition = True, synPermActiveInc = 0.01 ) def run(self): self.sp.compute(self.inputArray, True, self.activeArray)
def testCompatibilityCppPyDirectCall1D(self): """Check SP implementations have same behavior with 1D input.""" pySp = PySpatialPooler(inputDimensions=[121], columnDimensions=[300]) cppSp = CPPSpatialPooler(inputDimensions=[121], columnDimensions=[300]) data = numpy.zeros([121], dtype=uintType) for i in xrange(21): data[i] = 1 nCols = 300 d1 = numpy.zeros(nCols, dtype=uintType) d2 = numpy.zeros(nCols, dtype=uintType) pySp.compute(data, True, d1) # learn cppSp.compute(data, True, d2) d1 = d1.nonzero()[0].tolist() d2 = d2.nonzero()[0].tolist() self.assertListEqual( d1, d2, "SP outputs are not equal: \n%s \n%s" % (str(d1), str(d2)))
def runSaveTest(): inputSize = 600 outputSize = 2048 sp = SP( inputDimensions=(inputSize,), columnDimensions=(outputSize,), potentialRadius=16, potentialPct=0.85, globalInhibition=True, localAreaDensity=-1.0, numActiveColumnsPerInhArea=40.0, stimulusThreshold=1, synPermInactiveDec=0.008, synPermActiveInc=0.05, synPermConnected=0.10, minPctOverlapDutyCycle=0.001, minPctActiveDutyCycle=0.001, dutyCyclePeriod=1000, maxBoost=2.0, seed=-1, spVerbosity=0, wrapAround=True ) # Totally nukes any SP History data that exists in Redis. spHistory.nuke() # Create a facade around the SP that saves history as it runs. sp = spHistory.create(sp) # If the SP Facade is "active" that means it has a life spatial pooler. If it # is not active, it cannot compute, only playback the history. assert sp.isActive() for _ in range(0, 10): encoding = np.zeros(shape=(inputSize,)) for j, _ in enumerate(encoding): if random() < 0.1: encoding[j] = 1 # For each compute cycle, save the SP state to Redis for playback later. sp.compute(encoding, learn=True, save=True) # This SP's history can be retrieved with an id. return sp.getId()
def run(): sp = SpatialPooler( inputDimensions=[10, 15], columnDimensions=[5, 10], potentialRadius=2, potentialPct=0.5, synPermInactiveDec=0.1, synPermActiveInc=0.1, synPermConnected=0.1, localAreaDensity=0.1, numActiveColumnsPerInhArea=-1, globalInhibition=True ) inputArray = numpy.zeros(sp.getNumInputs()) activeArray = numpy.zeros(sp.getNumColumns()) Patcher().patchSP(sp) for i in range(100): generateInput(inputArray) sp.compute(inputArray, True, activeArray) print "Ran iteration:\t{0}".format(i)
def testUpdatePermanencesForColumn(self): sp = SpatialPooler(inputDimensions=[5], columnDimensions=[5], synPermConnected=0.1) sp._synPermTrimThreshold = 0.05 permanences = numpy.array([ [-0.10, 0.500, 0.400, 0.010, 0.020], [0.300, 0.010, 0.020, 0.120, 0.090], [0.070, 0.050, 1.030, 0.190, 0.060], [0.180, 0.090, 0.110, 0.010, 0.030], [0.200, 0.101, 0.050, -0.09, 1.100]]) truePermanences = SparseMatrix( [[0.000, 0.500, 0.400, 0.000, 0.000], # Clip - - Trim Trim [0.300, 0.000, 0.000, 0.120, 0.090], # - Trim Trim - - [0.070, 0.050, 1.000, 0.190, 0.060], # - - Clip - - [0.180, 0.090, 0.110, 0.000, 0.000], # - - - Trim Trim [0.200, 0.101, 0.050, 0.000, 1.000]]) # - - - Clip Clip trueConnectedSynapses = [ [0, 1, 1, 0, 0], [1, 0, 0, 1, 0], [0, 0, 1, 1, 0], [1, 0, 1, 0, 0], [1, 1, 0, 0, 1]] trueConnectedCounts = [2,2,2,2,3] for i in xrange(sp._numColumns): sp._updatePermanencesForColumn(permanences[i],i) self.assertListEqual( trueConnectedSynapses[i], list(sp._connectedSynapses.getRow(i)) ) self.assertListEqual(trueConnectedCounts, list(sp._connectedCounts))
def runSaveTest(): inputSize = 600 outputSize = 2048 sp = SP(inputDimensions=(inputSize, ), columnDimensions=(outputSize, ), potentialRadius=16, potentialPct=0.85, globalInhibition=True, localAreaDensity=-1.0, numActiveColumnsPerInhArea=40.0, stimulusThreshold=1, synPermInactiveDec=0.008, synPermActiveInc=0.05, synPermConnected=0.10, minPctOverlapDutyCycle=0.001, minPctActiveDutyCycle=0.001, dutyCyclePeriod=1000, maxBoost=2.0, seed=-1, spVerbosity=0, wrapAround=True) # Totally nukes any SP History data that exists in Redis. spHistory.nuke() # Create a facade around the SP that saves history as it runs. sp = spHistory.create(sp) # If the SP Facade is "active" that means it has a life spatial pooler. If it # is not active, it cannot compute, only playback the history. assert sp.isActive() for _ in range(0, 10): encoding = np.zeros(shape=(inputSize, )) for j, _ in enumerate(encoding): if random() < 0.1: encoding[j] = 1 # For each compute cycle, save the SP state to Redis for playback later. sp.compute(encoding, learn=True, save=True) # This SP's history can be retrieved with an id. return sp.getId()
def _create_network(self, mean=128): """ :param mean: int, the mean of the frame pix value, will be used in BASE_ENCODE. """ # some rulers of creating network # the product of the shape's two dimensions is equal to inputDimensions # columnDimensions equal to numberOfCols self.enc = MatrixEncoder(shape=self.shape, mean=mean) self.sp = SpatialPooler( inputDimensions=self.shape[0] * self.shape[1], columnDimensions=self.column_dimensions, potentialRadius=self.potential_radius, numActiveColumnsPerInhArea=self.numActive_columns_perInhArea, globalInhibition=self.global_inhibition, synPermActiveInc=self.syn_perm_active_inc, potentialPct=self.potential_pct, synPermInactiveDec=self.synPermInactiveDec, synPermConnected=self.synPermConnected, seed=self.sp_seed, localAreaDensity=self.localAreaDensity, stimulusThreshold=self.stimulusThreshold, maxBoost=self.maxBoost) self.tp = TP(numberOfCols=self.column_dimensions, cellsPerColumn=self.cells_per_column, initialPerm=self.initial_perm, connectedPerm=self.connected_perm, minThreshold=self.min_threshold, newSynapseCount=self.new_synapse_count, permanenceInc=self.permanence_inc, permanenceDec=self.permanence_dec, activationThreshold=self.activation_threshold, globalDecay=self.global_decay, burnIn=self.burn_in, pamLength=self.pam_length, maxSynapsesPerSegment=self.maxSynapsesPerSegment, maxSegmentsPerCell=self.maxSegmentsPerCell, seed=self.tp_seed, maxAge=self.maxAge)
def testBumpUpWeakColumns(self): sp = SpatialPooler(inputDimensions=[8], columnDimensions=[5]) sp._synPermBelowStimulusInc = 0.01 sp._synPermTrimThreshold = 0.05 sp._overlapDutyCycles = numpy.array([0, 0.009, 0.1, 0.001, 0.002]) sp._minOverlapDutyCycles = numpy.array(5*[0.01]) sp._potentialPools = SparseBinaryMatrix( [[1, 1, 1, 1, 0, 0, 0, 0], [1, 0, 0, 0, 1, 1, 0, 1], [0, 0, 1, 0, 1, 1, 1, 0], [1, 1, 1, 0, 0, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1]]) sp._permanences = SparseMatrix( [[0.200, 0.120, 0.090, 0.040, 0.000, 0.000, 0.000, 0.000], [0.150, 0.000, 0.000, 0.000, 0.180, 0.120, 0.000, 0.450], [0.000, 0.000, 0.014, 0.000, 0.032, 0.044, 0.110, 0.000], [0.041, 0.000, 0.000, 0.000, 0.000, 0.000, 0.178, 0.000], [0.100, 0.738, 0.045, 0.002, 0.050, 0.008, 0.208, 0.034]]) truePermanences = [ [0.210, 0.130, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000], # Inc Inc Inc Trim - - - - [0.160, 0.000, 0.000, 0.000, 0.190, 0.130, 0.000, 0.460], # Inc - - - Inc Inc - Inc [0.000, 0.000, 0.014, 0.000, 0.032, 0.044, 0.110, 0.000], #unchanged # - - - - - - - - [0.051, 0.000, 0.000, 0.000, 0.000, 0.000, 0.188, 0.000], # Inc Trim Trim - - - Inc - [0.110, 0.748, 0.055, 0.000, 0.060, 0.000, 0.218, 0.000]] sp._bumpUpWeakColumns() for i in xrange(sp._numColumns): perm = list(sp._permanences.getRow(i)) for j in xrange(sp._numInputs): self.assertAlmostEqual(truePermanences[i][j], perm[j])
def testRaisePermanenceThreshold(self): sp = SpatialPooler(inputDimensions=[5], columnDimensions=[5], synPermConnected=0.1, stimulusThreshold=3) sp._synPermBelowStimulusInc = 0.01 sp._permanences = SparseMatrix( [[0.0, 0.11, 0.095, 0.092, 0.01], [0.12, 0.15, 0.02, 0.12, 0.09], [0.51, 0.081, 0.025, 0.089, 0.31], [0.18, 0.0601, 0.11, 0.011, 0.03], [0.011, 0.011, 0.011, 0.011, 0.011]]) sp._connectedSynapses = SparseBinaryMatrix( [[0, 1, 0, 0, 0], [1, 1, 0, 1, 0], [1, 0, 0, 0, 1], [1, 0, 1, 0, 0], [0, 0, 0, 0, 0]]) sp._connectedCounts = numpy.array([1, 3, 2, 2, 0]) truePermanences = [ [0.0, 0.12, 0.105, 0.102, 0.0], # incremented once [0.12, 0.15, 0.02, 0.12, 0.09], # no change [0.53, 0.101, 0.0, 0.109, 0.33], # increment twice [0.22, 0.1001, 0.15, 0.051, 0.07], # increment four times [0.101, 0.101, 0.101, 0.101, 0.101]] #increment 9 times trueConnectedSynapses = [ [0, 1, 1, 1, 0], [1, 1, 0, 1, 0], [1, 1, 0, 1, 1], [1, 1, 1, 0, 0], [1, 1, 1, 1, 1]] trueConnectedCounts = [3, 3, 4, 3, 5] sp._raisePermanenceToThreshold() for i in xrange(sp._numColumns): perm = list(sp._permanences.getRow(i)) for j in xrange(sp._numInputs): self.assertAlmostEqual(truePermanences[i][j],perm[j]) self.assertListEqual( trueConnectedSynapses[i], list(sp._connectedSynapses.getRow(i)) ) self.assertEqual(trueConnectedCounts[i], sp._connectedCounts[i])
def __init__(self, inputDimensions, columnDimensions): """ Parameters: ---------- _inputDimensions: The size of the input. (m,n) will give a size m x n _columnDimensions: The size of the 2 dimensional array of columns """ self.inputDimensions = inputDimensions self.columnDimensions = columnDimensions self.inputSize = np.array(inputDimensions).prod() self.columnNumber = np.array(columnDimensions).prod() self.inputArray = np.zeros(self.inputSize) self.activeArray = np.zeros(self.columnNumber) self.sp = SP(self.inputDimensions, self.columnDimensions, potentialRadius = self.inputSize, numActiveColumnsPerInhArea = int(0.02*self.columnNumber), globalInhibition = True, synPermActiveInc = 0.01)
def __init__(self, inputShape, columnDimensions): """ Parameters: ---------- _inputShape : The size of the input. The product of the first and second elements of this parameter determines the size of the input vectors _columnDimensions: The size of the 2 dimensional array of columns """ self.inputShape = inputShape self.columnDimensions = columnDimensions self.inputSize = np.array(inputShape).prod() self.columnNumber = np.array(columnDimensions).prod() self.inputArray = np.zeros(self.inputSize) self.activeArray = np.zeros(self.columnNumber) self.sp = SP(self.inputShape, self.columnDimensions, potentialRadius=self.inputSize, numActiveColumnsPerInhArea=int(0.02 * self.columnNumber), globalInhibition=True, synPermActiveInc=0.01)
def __init__(self, inputShape, columnDimensions): """ Parameters: ---------- _inputShape : The size of the input. (m,n) will give a size m x n _columnDimensions : The size of the 2 dimensional array of columns """ self.inputShape = inputShape self.columnDimensions = columnDimensions self.inputSize = np.array(inputShape).prod() self.columnNumber = np.array(columnDimensions).prod() self.inputArray = np.zeros(self.inputSize) self.activeArray = np.zeros(self.columnNumber) self.sp = SP(self.inputShape, self.columnDimensions, potentialRadius = self.inputSize, numActiveColumnsPerInhArea = int(0.02*self.columnNumber), globalInhibition = True, synPermActiveInc = 0.01 )
def __init__(self, inputShape, columnDimensions): """ Parameters: ---------- _inputShape : The size of the input. The product of the first and second elements of this parameter determines the size of the input vectors _columnDimensions: The size of the 2 dimensional array of columns """ self.inputShape = inputShape self.columnDimensions = columnDimensions self.inputSize = np.array(inputShape).prod() self.columnNumber = np.array(columnDimensions).prod() self.inputArray = np.zeros(self.inputSize) self.activeArray = np.zeros(self.columnNumber) self.sp = SP(self.inputShape, self.columnDimensions, potentialRadius = self.inputSize, numActiveColumnsPerInhArea = int(0.02*self.columnNumber), globalInhibition = True, synPermActiveInc = 0.01 )
def run(): sp = SpatialPooler(inputDimensions=[10, 15], columnDimensions=[5, 10], potentialRadius=2, potentialPct=0.5, synPermInactiveDec=0.1, synPermActiveInc=0.1, synPermConnected=0.1, localAreaDensity=0.1, numActiveColumnsPerInhArea=-1, globalInhibition=True) inputArray = numpy.zeros(sp.getNumInputs()) activeArray = numpy.zeros(sp.getNumColumns()) Patcher().patchSP(sp) for i in range(100): generateInput(inputArray) sp.compute(inputArray, True, activeArray) print "Ran iteration:\t{0}".format(i)
class FeedbackModel(LearningModel): """ Structure: WordEncoder -> WordSP -> WordTM ActionEncoder -> ActionSP -> ActionTM WordTM, ActionTM -> GeneralSP -> GeneralTM """ def __init__(self, wordEncoder, actionEncoder, trainingSet, modulesParams=None): """ @param wordEncoder @param actionEncoder @param trainingSet: A module containing the trainingData, all of its categories and the inputIdx dict that maps each index in categories to an input name. """ super(FeedbackModel, self).__init__(wordEncoder, actionEncoder, trainingSet, modulesParams) self.initModules(trainingSet.categories, trainingSet.inputIdx) self.structure = { 'wordInput': 'wordEnc', 'wordEnc': 'wordSP', 'wordSP': 'wordTM', 'wordTM': 'generalSP', ### 'actionInput': 'actionEnc', 'actionEnc': 'actionSP', 'actionSP': 'actionTM', 'actionTM': 'generalSP', ### 'generalSP': 'generalTM', 'generalTM': None } self.modules = { 'generalTM': self.generalTM, #'generalSP': self.generalSP, 'wordTM': self.wordTM, 'wordSP': self.wordSP, 'wordEnc': self.wordEncoder, 'actionTM': self.actionTM, 'actionSP': self.actionSP, 'actionEnc': self.actionEncoder } #self.layer = Layer(self.structure, self.modules, self.classifier) def initModules(self, categories, inputIdx): modulesNames = { 'wordSP', 'wordTM', 'actionSP', 'actionTM', 'generalTM' } if (self.modulesParams is not None) and\ (set(self.modulesParams) == modulesNames): self.modulesParams['wordSP'].update(self.defaultWordSPParams) self.modulesParams['wordTM'].update(self.defaultWordTMParams) self.modulesParams['actionSP'].update(self.defaultActionSPParams) self.modulesParams['actionTM'].update(self.defaultActionTMParams) self.wordSP = SpatialPooler(**self.modulesParams['wordSP']) self.wordTM = TemporalMemory(**self.modulesParams['wordTM']) self.actionSP = SpatialPooler(**self.modulesParams['actionSP']) self.actionTM = TemporalMemory(**self.modulesParams['actionTM']) defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.modulesParams['generalTM'].update(defaultGeneralTMParams) self.generalTM = TemporalMemory(**self.modulesParams['generalTM']) print("Using external Parameters!") else: self.wordSP = SpatialPooler(**self.defaultWordSPParams) self.wordTM = TemporalMemory(**self.defaultWordTMParams) self.actionSP = SpatialPooler(**self.defaultActionSPParams) self.actionTM = TemporalMemory(**self.defaultActionTMParams) print("External parameters invalid or not found, using"\ " the default ones") defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.generalTM = TemporalMemory(**defaultGeneralTMParams) self.classifier = CLAClassifierCond(steps=[1, 2, 3], alpha=0.1, actValueAlpha=0.3, verbosity=0) self.startPointOverlap = CommonOverlap('==', 1, self.actionTM.columnDimensions, threshold=0.5) def processInput(self, sentence, actionSeq, wordSDR=None, actionSDR=None, verbosity=0, learn=True): if wordSDR is None: wordSDR = numpy.zeros(self.wordSP.getColumnDimensions(), dtype=numpy.uint8) if actionSDR is None: actionSDR = numpy.zeros(self.actionSP.getColumnDimensions(), dtype=numpy.uint8) nCellsFromSentence = self.generalTM.columnDimensions[1] sentenceActiveCells = set() actionSeqActiveCells = set() recordNum = 0 # Feed the words from the sentence to the region 1 for word in sentence: encodedWord = self.wordEncoder.encode(word) self.wordSP.compute(encodedWord, learn, wordSDR) self.wordTM.compute(set(numpy.where(wordSDR > 0)[0]), learn) region1Predicting = (self.wordTM.predictiveCells != set()) sentenceActiveCells.update(self.wordTM.getActiveCells()) #print("{} - {}".format(word, )) retVal = self.classifier.compute( recordNum=recordNum, patternNZ=self.wordTM.getActiveCells(), classification={ 'bucketIdx': self.wordEncoder.getBucketIndices(word)[0], 'actValue': word }, learn=learn, infer=True, conditionFunc=lambda x: x.endswith("-event")) recordNum += 1 bestPredictions = [] for step in retVal: if step == 'actualValues': continue higherProbIndex = numpy.argmax(retVal[step]) bestPredictions.append(retVal['actualValues'][higherProbIndex]) if region1Predicting: # Feed the sentence to the region 2 self.generalTM.compute(sentenceActiveCells, learn) generalPrediction = set( self.generalTM.mapCellsToColumns( self.generalTM.predictiveCells).keys()) # Normalize predictions so cells stay in the actionTM # range. generalPrediction = set([ i - nCellsFromSentence for i in generalPrediction if i >= nCellsFromSentence ]) # columnsPrediction = numpy.zeros( # self.actionSP.getNumColumns(), # dtype=numpy.uint8 # ) # columnsPrediction[self.actionTM.mapCellsToColumns( # generalPrediction).keys()] = 1 # self.startPointOverlap.updateCounts(columnsPrediction) # # if len(actionSeq) <= 0: # # assert region1Predicting, "Region 1 is not predicting, consider "\ # "training the model for a longer time" # predictedValues = [] # # firstColumns = numpy.where(numpy.bitwise_and(columnsPrediction > 0, # self.startPointOverlap.commonElements)) # # predictedEnc = numpy.zeros(self.actionEncoder.getWidth(), # dtype=numpy.uint8) # predictedEnc[ # [self.actionSP._mapColumn(col) for col in firstColumns]] = 1 # predictedValues.append(self.actionEncoder.decode(predictedEnc)) # # print(firstColumns) # # self.actionTM.predictiveCells.update(generalPrediction) # self.actionTM.compute(firstColumns, learn) # # predictedColumns = self.actionTM.mapCellsToColumns( # self.actionTM.predictiveCells).keys()[0] for action in actionSeq: encodedAction = self.actionEncoder.encode(action) # Use the predicted cells from region 2 to bias the # activity of cells in region 1. if region1Predicting: self.actionTM.predictiveCells.update(generalPrediction) self.actionSP.compute(encodedAction, learn, actionSDR) self.actionTM.compute(set(numpy.where(actionSDR > 0)[0]), learn) actionActiveCells = [ i + nCellsFromSentence for i in self.actionTM.getActiveCells() ] actionSeqActiveCells.update(actionActiveCells) self.classifier.compute( recordNum=recordNum, patternNZ=actionActiveCells, classification={ 'bucketIdx': self.wordEncoder.getWidth() + self.actionEncoder.getBucketIndices(action)[0], 'actValue': action }, learn=learn, infer=True, conditionFunc=lambda x: x.endswith("-event")) recordNum += 1 if region1Predicting: self.generalTM.compute(actionSeqActiveCells, True) if verbosity > 0: print('Best Predictions: ' + str(bestPredictions)) if verbosity > 3: print(" | CLAClassifier best predictions for step1: ") top = sorted(retVal[1].tolist(), reverse=True)[:3] for prob in top: probIndex = retVal[1].tolist().index(prob) print( str(retVal['actualValues'][probIndex]) + " - " + str(prob)) print(" | CLAClassifier best predictions for step2: ") top = sorted(retVal[2].tolist(), reverse=True)[:3] for prob in top: probIndex = retVal[2].tolist().index(prob) print( str(retVal['actualValues'][probIndex]) + " - " + str(prob)) print("") print("---------------------------------------------------") print("") return bestPredictions def train(self, numIterations, trainingData=None, maxTime=-1, verbosity=0): """ @param numIterations @param trainingData @param maxTime: (default: -1) Training stops if maxTime (in minutes) is exceeded. Note that this may interrupt an ongoing train ireration. -1 is no time restrictions. @param verbosity: (default: 0) How much verbose about the process. 0 doesn't print anything. """ startTime = time.time() maxTimeReached = False recordNum = 0 if trainingData is None: trainingData = self.trainingData wordSDR = numpy.zeros(self.wordSP.getColumnDimensions(), dtype=numpy.uint8) actionSDR = numpy.zeros(self.actionSP.getColumnDimensions(), dtype=numpy.uint8) #generalSDR = numpy.zeros(self.generalSP.getColumnDimensions(), # dtype=numpy.uint8) generalInput = numpy.zeros(self.generalTM.numberOfColumns(), dtype=numpy.uint8) for iteration in xrange(numIterations): print("Iteration " + str(iteration)) for sentence, actionSeq in trainingData: self.processInput(sentence, actionSeq, wordSDR, actionSDR) self.reset() recordNum += 1 if maxTime > 0: elapsedMinutes = (time.time() - startTime) * (1.0 / 60.0) if elapsedMinutes > maxTime: maxTimeReached = True print("maxTime reached, training stoped at iteration "\ "{}!".format(self.iterationsTrained)) break if maxTimeReached: break self.iterationsTrained += 1 def inputSentence(self, sentence, verbosity=1, learn=False): return self.processInput(sentence, [], verbosity=verbosity, learn=learn)
class Example(): """A class to hold our code. Going object oriented""" def __init__(self, inputShape, columnDimensions): """ Parameters: ---------- _inputShape : The size of the input. (m,n) will give a size m x n _columnDimensions : The size of the 2 dimensional array of columns """ self.inputShape = inputShape self.columnDimensions = columnDimensions self.inputSize = np.array(inputShape).prod() self.columnNumber = np.array(columnDimensions).prod() self.inputArray = np.zeros(self.inputSize) self.activeArray = np.zeros(self.columnNumber) self.sp = SP(self.inputShape, self.columnDimensions, potentialRadius = self.inputSize, numActiveColumnsPerInhArea = int(0.02*self.columnNumber), globalInhibition = True, synPermActiveInc = 0.01 ) def createInput(self): """create a random input vector""" print "-" * 70 + "Creating a random input vector" + "-" * 70 #clear the inputArray to zero before creating a new input vector self.inputArray[0:] = 0 for i in range(self.inputSize): #randrange returns 0 or 1 self.inputArray[i] = randrange(2) def run(self): """Run the spatial pooler with the input vector""" print "-" * 80 + "Computing the SDR" + "-" * 80 #activeArray[column]=1 if column is active after spatial pooling self.sp.compute(self.inputArray, True, self.activeArray) print self.activeArray.nonzero() def addNoise(self, noiseLevel): """Flip the value of 10% of input bits (add noise) PARAMETERS ---------- noiseLevel : The percentage of total input bits that should be flipped """ for i in range(int(noiseLevel * self.inputSize)): #0.1*self.inputSize represents 10% of the total input bits #random.random() returns a float between 0 and 1 randomPosition = int(random() * self.inputSize) #Flipping the bit at the randomly picked position if self.inputArray[randomPosition] == 1: self.inputArray[randomPosition] = 0 else: self.inputArray[randomPosition] = 1
def initialize(self): """ Initialize this node. """ Node.initialize(self) for child in self.children: child.initialize() # Create the input map # An input map is a set of input elements (cells or sensor bits) that can be are grouped or combined # For example, if we have 2 children (#1 and #2) with dimensions 6 and 12 respectively, # a grouped input map would be something like: # 111111222222222222 # while a combined one would be something like: # 122122122122122122 self._inputMap = [] sumDimension = 0 self.inputMapType = InputMapType.grouped if self.inputMapType == InputMapType.grouped: for child in self.children: dimension = child.width * child.height sumDimension += dimension # Arrange input from child into input map of this region if child.type == NodeType.region: for column in child.columns: inputElem = column.cells[0] self._inputMap.append(inputElem) else: for bit in child.bits: inputElem = bit self._inputMap.append(inputElem) elif self.inputMapType == InputMapType.combined: # Get the overall dimension and the minimum dimension among all children minDimension = self.children[0].width * self.children[0].height for child in self.children: dimension = child.width * child.height sumDimension += dimension if dimension < minDimension: minDimension = dimension # Use the minimum dimension as a multiplication common factor to determine the frequency of each child element in a sequence frequencies = [] nextIdx = [] for child in self.children: dimension = child.width * child.height if dimension % minDimension == 0: frequency = dimension / minDimension frequencies.append(frequency) nextIdx.append(0) else: QtGui.QMessageBox.warning(None, "Warning", "Children dimensions should have a common multiple factor!") return # Distribute alternatively child elements into input map according to their frequencies for elemIdx in range(sumDimension): for childIdx in range(len(self.children)): child = self.children[childIdx] # Start distribution taking in account the last inserted element i0 = nextIdx[childIdx] iN = i0 + frequencies[childIdx] nextIdx[childIdx] = iN + 1 for i in range(i0, iN): if child.type == NodeType.region: inputElem = child.columns[i].cells[0] self._inputMap.append(inputElem) else: inputElem = child.bits[i] self._inputMap.append(inputElem) # Initialize elements self.columns = [] for x in range(self.width): for y in range(self.height): column = Column() column.x = x column.y = y for z in range(self.numCellsPerColumn): cell = Cell() cell.z = z column.cells.append(cell) self.columns.append(column) # Create Spatial Pooler instance with appropriate parameters self.spatialPooler = SpatialPooler( inputDimensions = (sumDimension, 1), columnDimensions = (self.width, self.height), potentialRadius = self.potentialRadius, potentialPct = self.potentialPct, globalInhibition = self.globalInhibition, localAreaDensity = self.localAreaDensity, numActiveColumnsPerInhArea = self.numActiveColumnsPerInhArea, stimulusThreshold = self.stimulusThreshold, synPermInactiveDec = self.proximalSynPermDecrement, synPermActiveInc = self.proximalSynPermIncrement, synPermConnected = self.proximalSynConnectedPerm, minPctOverlapDutyCycle = self.minPctOverlapDutyCycle, minPctActiveDutyCycle = self.minPctActiveDutyCycle, dutyCyclePeriod = self.dutyCyclePeriod, maxBoost = self.maxBoost, seed = -1, spVerbosity = False) # Create Temporal Pooler instance with appropriate parameters self.temporalPooler = TemporalPooler( columnDimensions = (self.width, self.height), cellsPerColumn = self.numCellsPerColumn, learningRadius = self.learningRadius, initialPermanence = self.distalSynInitialPerm, connectedPermanence = self.distalSynConnectedPerm, minThreshold = self.minThreshold, maxNewSynapseCount = self.maxNumNewSynapses, permanenceIncrement = self.distalSynPermIncrement, permanenceDecrement = self.distalSynPermDecrement, activationThreshold = self.activationThreshold, seed = 42)
from nupic.encoders import ScalarEncoder from nupic.research.spatial_pooler import SpatialPooler enc = ScalarEncoder(n=10000, w=21, minval = 0, maxval=10000) from std_msgs.msg import String, Float64 t =[] for i in range(10000): t.append(enc.encode(i)) print("Encoding is done") sp = SpatialPooler(inputDimensions=(10000,), columnDimensions=(20,), potentialRadius=15, numActiveColumnsPerInhArea=1, globalInhibition=True, synPermActiveInc=0.03, potentialPct=1.0) output = numpy.zeros((20,),dtype="int") for _ in range(10): for i in xrange(10000): sp.compute(t[i], learn=True, activeArray=output) print("Spatial pooler strengthened") from nupic.research.TP import TP tp = TP(numberOfCols=10000, cellsPerColumn=20, initialPerm=0.5, connectedPerm=0.5, minThreshold=10, newSynapseCount=10,
def testInhibition(self): """ Test if the firing number of coincidences after inhibition equals spatial pooler numActiveColumnsPerInhArea. """ # Miscellaneous variables: # n, w: n, w of encoders # inputLen: Length of binary input # synPermConnected: Spatial pooler synPermConnected # synPermActiveInc: Spatial pooler synPermActiveInc # connectPct: Initial connect percentage of permanences # columnDimensions: Number of spatial pooler coincidences # numActiveColumnsPerInhArea: Spatial pooler numActiveColumnsPerInhArea # stimulusThreshold: Spatial pooler stimulusThreshold # spSeed: Spatial pooler for initial permanences # stimulusThresholdInh: Parameter for inhibition, default value 0.00001 # kDutyCycleFactor: kDutyCycleFactor for dutyCycleTieBreaker in # Inhibition # spVerbosity: Verbosity to print other sp initial parameters # testIter: Testing iterations n = 100 w = 15 inputLen = 300 columnDimensions = 2048 numActiveColumnsPerInhArea = 40 stimulusThreshold = 0 spSeed = 1956 stimulusThresholdInh = 0.00001 kDutyCycleFactor = 0.01 spVerbosity = 0 testIter = 100 spTest = SpatialPooler( columnDimensions=(columnDimensions, 1), inputDimensions=(1, inputLen), potentialRadius=inputLen / 2, numActiveColumnsPerInhArea=numActiveColumnsPerInhArea, spVerbosity=spVerbosity, stimulusThreshold=stimulusThreshold, seed=spSeed ) initialPermanence = spTest._initialPermanence() spTest._masterPotentialM, spTest._masterPermanenceM = ( spTest._makeMasterCoincidences(spTest.numCloneMasters, spTest._coincRFShape, spTest.potentialPct, initialPermanence, spTest.random)) spTest._updateInhibitionObj() boostFactors = numpy.ones(columnDimensions) for i in range(testIter): spTest._iterNum = i # random binary input input_ = numpy.zeros((1, inputLen)) nonzero = numpy.random.random(inputLen) input_[0][numpy.where (nonzero < float(w)/float(n))] = 1 # overlap step spTest._computeOverlapsFP(input_, stimulusThreshold=spTest.stimulusThreshold) spTest._overlaps *= boostFactors onCellIndices = numpy.where(spTest._overlaps > 0) spTest._onCells.fill(0) spTest._onCells[onCellIndices] = 1 denseOn = spTest._onCells # update _dutyCycleBeforeInh spTest.dutyCyclePeriod = min(i + 1, 1000) spTest._dutyCycleBeforeInh = ( (spTest.dutyCyclePeriod - 1) * spTest._dutyCycleBeforeInh +denseOn) / spTest.dutyCyclePeriod dutyCycleTieBreaker = spTest._dutyCycleAfterInh.copy() dutyCycleTieBreaker *= kDutyCycleFactor # inhibition step numOn = spTest._inhibitionObj.compute( spTest._overlaps + dutyCycleTieBreaker, spTest._onCellIndices, stimulusThresholdInh, # stimulusThresholdInh max(spTest._overlaps)/1000, # addToWinners ) # update _dutyCycleAfterInh spTest._onCells.fill(0) onCellIndices = spTest._onCellIndices[0:numOn] spTest._onCells[onCellIndices] = 1 denseOn = spTest._onCells spTest._dutyCycleAfterInh = (((spTest.dutyCyclePeriod-1) * spTest._dutyCycleAfterInh + denseOn) / spTest.dutyCyclePeriod) # learning step spTest._adaptSynapses(onCellIndices, [], input_) # update boostFactor spTest._updateBoostFactors() boostFactors = spTest._firingBoostFactors # update dutyCycle and boost if ((spTest._iterNum+1) % 50) == 0: spTest._updateInhibitionObj() spTest._updateMinDutyCycles( spTest._dutyCycleBeforeInh, spTest.minPctDutyCycleBeforeInh, spTest._minDutyCycleBeforeInh) spTest._updateMinDutyCycles( spTest._dutyCycleAfterInh, spTest.minPctDutyCycleAfterInh, spTest._minDutyCycleAfterInh) # test numOn and spTest.numActiveColumnsPerInhArea self.assertEqual(numOn, spTest.numActiveColumnsPerInhArea, "Error at input %s, actual numOn are: %i, " "numActivePerInhAre is: %s" % ( i, numOn, numActiveColumnsPerInhArea))
inputVectors = generateRandomSDR(numInputVector, inputSize, numActiveBits) elif inputVectorType == 'dense': inputSize = 1000 inputVectors = generateDenseVectors(numInputVector, inputSize) elif inputVectorType == 'correlate-input': inputVectors = generateCorrelatedInputs() numInputVector, inputSize = inputVectors.shape else: raise ValueError columnNumber = 2048 sp = SpatialPooler((inputSize, 1), (columnNumber, 1), potentialRadius=int(0.5 * inputSize), numActiveColumnsPerInhArea=int(0.02 * columnNumber), globalInhibition=True, seed=1936, maxBoost=1, dutyCyclePeriod=1000, synPermActiveInc=0.001, synPermInactiveDec=0.001) inspectSpatialPoolerStats(sp, inputVectors, inputVectorType+"beforeTraining") # classification Accuracy before training noiseLevelList = np.linspace(0, 1.0, 21) accuracyBeforeTraining = classificationAccuracyVsNoise( sp, inputVectors, noiseLevelList) accuracyWithoutSP = classificationAccuracyVsNoise( None, inputVectors, noiseLevelList)
def initialize(self): """ Initialize this node. """ # Check if this region has nodes that feed it numFeeders = len(Global.project.network.getFeederNodes(self)) if numFeeders == 0: QtGui.QMessageBox.warning(None, "Warning", "Region '" + self.name + "' does not have any child!") return # Initialize this node and the nodes that feed it Node.initialize(self) # Create the input map # An input map is a set of input elements (cells or sensor bits) that should are grouped # For example, if we have 2 nodes that feed this region (#1 and #2) with dimensions 6 and 12 respectively, # a input map would be something like: # 111111222222222222 self._inputMap = [] elemIdx = 0 for feeder in Global.project.network.getFeederNodes(self): # Arrange input from feeder into input map of this region if feeder.type == NodeType.region: for column in feeder.columns: inputElem = column.cells[0] self._inputMap.append(inputElem) else: for bit in feeder.bits: inputElem = bit self._inputMap.append(inputElem) elemIdx += 1 # Initialize elements self.columns = [] colIdx = 0 for x in range(self.width): for y in range(self.height): column = Column() column.x = x column.y = y for z in range(self.numCellsPerColumn): cell = Cell() cell.index = (colIdx * self.numCellsPerColumn) + z cell.z = z column.cells.append(cell) self.columns.append(column) colIdx += 1 # Create Spatial Pooler instance with appropriate parameters self.spatialPooler = SpatialPooler( inputDimensions = (self.getInputSize(), 1), columnDimensions = (self.width, self.height), potentialRadius = self.potentialRadius, potentialPct = self.potentialPct, globalInhibition = self.globalInhibition, localAreaDensity = self.localAreaDensity, numActiveColumnsPerInhArea = self.numActiveColumnsPerInhArea, stimulusThreshold = self.stimulusThreshold, synPermInactiveDec = self.proximalSynPermDecrement, synPermActiveInc = self.proximalSynPermIncrement, synPermConnected = self.proximalSynConnectedPerm, minPctOverlapDutyCycle = self.minPctOverlapDutyCycle, minPctActiveDutyCycle = self.minPctActiveDutyCycle, dutyCyclePeriod = self.dutyCyclePeriod, maxBoost = self.maxBoost, seed = self.spSeed, spVerbosity = False) # Create Temporal Pooler instance with appropriate parameters self.temporalPooler = TemporalPooler( columnDimensions = (self.width, self.height), cellsPerColumn = self.numCellsPerColumn, initialPermanence = self.distalSynInitialPerm, connectedPermanence = self.distalSynConnectedPerm, minThreshold = self.minThreshold, maxNewSynapseCount = self.maxNumNewSynapses, permanenceIncrement = self.distalSynPermIncrement, permanenceDecrement = self.distalSynPermDecrement, activationThreshold = self.activationThreshold, seed = self.tpSeed) return True
def testSP(): """ Run a SP test """ elemSize = 400 numSet = 42 addNear = True numRecords = 2 wantPlot = True poolPct = 0.5 itr = 1 doLearn = True while numRecords < 3: # Setup a SP sp = SpatialPooler( columnDimensions=(2048, 1), inputDimensions=(1, elemSize), potentialRadius=elemSize/2, numActiveColumnsPerInhArea=40, spVerbosity=0, stimulusThreshold=0, seed=1, potentialPct=poolPct, globalInhibition=True ) # Generate inputs using rand() inputs = generateRandomInput(numRecords, elemSize, numSet) if addNear: # Append similar entries (distance of 1) appendInputWithNSimilarValues(inputs, 42) inputSize = len(inputs) print 'Num random records = %d, inputs to process %d' % (numRecords, inputSize) # Run a number of iterations, with learning on or off, # retrieve results from the last iteration only outputs = np.zeros((inputSize,2048)) numIter = 1 if doLearn: numIter = itr for iter in xrange(numIter): for i in xrange(inputSize): time.sleep(0.001) if iter == numIter - 1: # TODO: See https://github.com/numenta/nupic/issues/2072 sp.compute(inputs[i], learn=doLearn, activeArray=outputs[i]) #print outputs[i].sum(), outputs[i] else: # TODO: See https://github.com/numenta/nupic/issues/2072 output = np.zeros(2048) sp.compute(inputs[i], learn=doLearn, activeArray=output) # Build a plot from the generated input and output and display it distribMatrix = generatePlot(outputs, inputs) # If we don't want a plot, just continue if wantPlot: plt.imshow(distribMatrix, origin='lower', interpolation = "nearest") plt.ylabel('SP (2048/40) distance in %') plt.xlabel('Input (400/42) distance in %') title = 'SP distribution' if doLearn: title += ', leaning ON' else: title += ', learning OFF' title += ', inputs = %d' % len(inputs) title += ', iterations = %d' % numIter title += ', poolPct =%f' % poolPct plt.suptitle(title, fontsize=12) plt.show() #plt.savefig(os.path.join('~/Desktop/ExperimentResults/videos5', '%s' % numRecords)) #plt.clf() numRecords += 1 return
def testSPFile(): """ Run test on the data file - the file has records previously encoded. """ spSize = 2048 spSet = 40 poolPct = 0.5 pattern = [50, 1000] doLearn = True PLOT_PRECISION = 100.0 distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1)) inputs = [] #file = open('~/Desktop/ExperimentResults/sampleArtificial.csv', 'rb') #elemSize = 400 #numSet = 42 #file = open('~/Desktop/ExperimentResults/sampleDataBasilOneField.csv', 'rb') #elemSize = 499 #numSet = 7 outdir = '~/Desktop/ExperimentResults/Basil100x21' inputFile = outdir+'.csv' file = open(inputFile, 'rb') elemSize = 100 numSet = 21 reader = csv.reader(file) for row in reader: input = np.array(map(float, row), dtype=realDType) if len(input.nonzero()[0]) != numSet: continue inputs.append(input.copy()) file.close() # Setup a SP sp = SpatialPooler( columnDimensions=(spSize, 1), inputDimensions=(1, elemSize), potentialRadius=elemSize/2, numActiveColumnsPerInhArea=spSet, spVerbosity=0, stimulusThreshold=0, synPermConnected=0.10, seed=1, potentialPct=poolPct, globalInhibition=True ) cleanPlot = False doLearn = False print 'Finished reading file, inputs/outputs to process =', len(inputs) size = len(inputs) for iter in xrange(100): print 'Iteration', iter # Learn if iter != 0: for learnRecs in xrange(pattern[0]): # TODO: See https://github.com/numenta/nupic/issues/2072 ind = np.random.random_integers(0, size-1, 1)[0] sp.compute(inputs[ind], learn=True, activeArray=outputs[ind]) # Test for _ in xrange(pattern[1]): rand1 = np.random.random_integers(0, size-1, 1)[0] rand2 = np.random.random_integers(0, size-1, 1)[0] sp.compute(inputs[rand1], learn=False, activeArray=output1) sp.compute(inputs[rand2], learn=False, activeArray=output2) outDist = (abs(output1-output2) > 0.1) intOutDist = int(outDist.sum()/2+0.1) inDist = (abs(inputs[rand1]-inputs[rand2]) > 0.1) intInDist = int(inDist.sum()/2+0.1) if intInDist != numSet or intOutDist != spSet: print rand1, rand2, '-', intInDist, intOutDist x = int(PLOT_PRECISION*intOutDist/spSet) y = int(PLOT_PRECISION*intInDist/numSet) if distribMatrix[x, y] < 0.1: distribMatrix[x, y] = 3 else: if distribMatrix[x, y] < 10: distribMatrix[x, y] += 1 if True: plt.imshow(distribMatrix, origin='lower', interpolation = "nearest") plt.ylabel('SP (%d/%d) distance in pct' % (spSize, spSet)) plt.xlabel('Input (%d/%d) distance in pct' % (elemSize, numSet)) title = 'SP distribution' title += ', iter = %d' % iter title += ', Pct =%f' % poolPct plt.suptitle(title, fontsize=12) #plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosArtData', '%s' % iter)) plt.savefig(os.path.join(outdir, '%s' % iter)) plt.clf() distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
def frequency(self, n=15, w=7, columnDimensions = 2048, numActiveColumnsPerInhArea = 40, stimulusThreshold = 0, spSeed = 1, spVerbosity = 0, numColors = 2, seed=42, minVal=0, maxVal=10, encoder = 'category', forced=True): """ Helper function that tests whether the SP predicts the most frequent record """ print "\nRunning SP overlap test..." print encoder, 'encoder,', 'Random seed:', seed, 'and', numColors, 'colors' #Setting up SP and creating training patterns # Instantiate Spatial Pooler spImpl = SpatialPooler( columnDimensions=(columnDimensions, 1), inputDimensions=(1, n), potentialRadius=n/2, numActiveColumnsPerInhArea=numActiveColumnsPerInhArea, spVerbosity=spVerbosity, stimulusThreshold=stimulusThreshold, potentialPct=0.5, seed=spSeed, ) rnd.seed(seed) numpy.random.seed(seed) colors = [] coincs = [] reUsedCoincs = [] spOutput = [] patterns = set([]) # Setting up the encodings if encoder=='scalar': enc = scalar.ScalarEncoder(name='car', w=w, n=n, minval=minVal, maxval=maxVal, periodic=False, forced=True) # forced: it's strongly recommended to use w>=21, in the example we force skip the check for readibility for y in xrange(numColors): temp = enc.encode(rnd.random()*maxVal) colors.append(numpy.array(temp, dtype=realDType)) else: for y in xrange(numColors): sdr = numpy.zeros(n, dtype=realDType) # Randomly setting w out of n bits to 1 sdr[rnd.sample(xrange(n), w)] = 1 colors.append(sdr) # Training the sp print 'Starting to train the sp on', numColors, 'patterns' startTime = time.time() for i in xrange(numColors): # TODO: See https://github.com/numenta/nupic/issues/2072 spInput = colors[i] onCells = numpy.zeros(columnDimensions) spImpl.compute(spInput, learn=True, activeArray=onCells) spOutput.append(onCells.tolist()) activeCoincIndices = set(onCells.nonzero()[0]) # Checking if any of the active cells have been previously active reUsed = activeCoincIndices.intersection(patterns) if len(reUsed) == 0: # The set of all coincidences that have won at least once coincs.append((i, activeCoincIndices, colors[i])) else: reUsedCoincs.append((i, activeCoincIndices, colors[i])) # Adding the active cells to the set of coincs that have been active at # least once patterns.update(activeCoincIndices) if (i + 1) % 100 == 0: print 'Record number:', i + 1 print "Elapsed time: %.2f seconds" % (time.time() - startTime) print len(reUsedCoincs), "re-used coinc(s)," # Check if results match expectations summ = [] for z in coincs: summ.append(sum([len(z[1].intersection(y[1])) for y in reUsedCoincs])) zeros = len([x for x in summ if x==0]) factor = max(summ)*len(summ)/sum(summ) if len(reUsed) < 10: self.assertLess(factor, 41, "\nComputed factor: %d\nExpected Less than %d" % ( factor, 41)) self.assertLess(zeros, 0.99*len(summ), "\nComputed zeros: %d\nExpected Less than %d" % ( zeros, 0.99*len(summ))) else: self.assertLess(factor, 8, "\nComputed factor: %d\nExpected Less than %d" % ( factor, 8)) self.assertLess(zeros, 12, "\nComputed zeros: %d\nExpected Less than %d" % ( zeros, 12))
# Pick a combination of parameter values parameters.nextCombination() #parameters.nextRandomCombination() synPermConn = parameters.getValue("synPermConn") synPermDec = synPermConn * parameters.getValue("synPermDecFrac") synPermInc = synPermConn * parameters.getValue("synPermIncFrac") # Instantiate our spatial pooler sp = SpatialPooler( inputDimensions=(32, 32), # Size of image patch columnDimensions=(32, 32), potentialRadius=10000, # Ensures 100% potential pool potentialPct=0.8, globalInhibition=True, localAreaDensity=-1, # Using numActiveColumnsPerInhArea numActiveColumnsPerInhArea=64, # All input activity can contribute to feature output stimulusThreshold=0, synPermInactiveDec=synPermDec, synPermActiveInc=synPermInc, synPermConnected=synPermConn, maxBoost=1.0, seed=1956, # The seed that Grok uses spVerbosity=1) # Instantiate the spatial pooler test bench. tb = VisionTestBench(sp) # Instantiate the classifier clf = KNNClassifier() # Train the spatial pooler on trainingVectors.
def setUp(self): self.sp = SpatialPooler(columnDimensions=[5], inputDimensions=[5])
exputils.deleteImages(outputImagePrefix + "*.gif") inputWidth = 50 inputHeight = 50 input = exputils.getRandom2dBoolMatrix(inputWidth, inputHeight) generateOutputMovie = True flatInput = input.flatten() flatInputLength = len(flatInput) print "initializing spacial pooler" spColumnHeight = flatInputLength spatialPooler = SpatialPooler(inputDimensions=flatInputLength, columnDimensions=spColumnHeight, potentialRadius=10, numActiveColumnsPerInhArea=1, globalInhibition=True, synPermActiveInc=0.03, potentialPct=1.00) print "spacial pooler initialization complete\n" printInitialSynapses = False if printInitialSynapses: print "spacial pooler initial randomly connected synapses:" for col in xrange(spColumnHeight): currentlyConnected = numpy.zeros(shape=flatInputLength, dtype="uint8") spatialPooler.getConnectedSynapses( column=col, connectedSynapses=currentlyConnected) print " ", currentlyConnected print "spatial pooler initialized\n"
def __init__(self): self.tm = MonitoredSensorimotorTemporalMemory(**TM_PARAMS) self.sp = SpatialPooler(**SP_PARAMS)
inputCategories = {} for category in inputCategoriesRaw: inputCategories[category] = categoryEncoder.encode(category) print " " + (category + ":").ljust(10), inputCategories[category] print " " + "UNKNOWN:".ljust(10), categoryEncoder.encode("UNKNOWN") print "categories initialized\n" print "initializing spatial pooler" spatialPoolerInputWidth = (len(inputCategoriesRaw) + 1) * inputCategoriesWindowWidth spatialPoolerColumnHeight = 8 spatialPooler = SpatialPooler(inputDimensions=spatialPoolerInputWidth, columnDimensions=spatialPoolerColumnHeight, potentialRadius=15, numActiveColumnsPerInhArea=1, globalInhibition=True, synPermActiveInc=0.03, potentialPct=1.0) print " spacial pooler initial randomly connected synapses:" for col in xrange(4): currentlyConnected = numpy.zeros(shape=spatialPoolerInputWidth, dtype="int") spatialPooler.getConnectedSynapses(column=col, connectedSynapses=currentlyConnected) print " ", currentlyConnected print "spatial pooler initialized\n" spatialPoolerTest = False if spatialPoolerTest:
def runHotgym(numRecords): with open(_PARAMS_PATH, "r") as f: modelParams = yaml.safe_load(f)["modelParams"] enParams = modelParams["sensorParams"]["encoders"] spParams = modelParams["spParams"] tmParams = modelParams["tmParams"] timeOfDayEncoder = DateEncoder( timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"]) weekendEncoder = DateEncoder( weekend=enParams["timestamp_weekend"]["weekend"]) scalarEncoder = RandomDistributedScalarEncoder( enParams["consumption"]["resolution"]) encodingWidth = (timeOfDayEncoder.getWidth() + weekendEncoder.getWidth() + scalarEncoder.getWidth()) sp = SpatialPooler( # How large the input encoding will be. inputDimensions=(encodingWidth), # How many mini-columns will be in the Spatial Pooler. columnDimensions=(spParams["columnCount"]), # What percent of the columns"s receptive field is available for potential # synapses? potentialPct=spParams["potentialPct"], # This means that the input space has no topology. globalInhibition=spParams["globalInhibition"], localAreaDensity=spParams["localAreaDensity"], # Roughly 2%, giving that there is only one inhibition area because we have # turned on globalInhibition (40 / 2048 = 0.0195) numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"], # How quickly synapses grow and degrade. synPermInactiveDec=spParams["synPermInactiveDec"], synPermActiveInc=spParams["synPermActiveInc"], synPermConnected=spParams["synPermConnected"], # boostStrength controls the strength of boosting. Boosting encourages # efficient usage of SP columns. boostStrength=spParams["boostStrength"], # Random number generator seed. seed=spParams["seed"], # TODO: is this useful? # Determines if inputs at the beginning and end of an input dimension should # be considered neighbors when mapping columns to inputs. wrapAround=False ) tm = TemporalMemory( # Must be the same dimensions as the SP columnDimensions=(tmParams["columnCount"],), # How many cells in each mini-column. cellsPerColumn=tmParams["cellsPerColumn"], # A segment is active if it has >= activationThreshold connected synapses # that are active due to infActiveState activationThreshold=tmParams["activationThreshold"], initialPermanence=tmParams["initialPerm"], # TODO: This comes from the SP params, is this normal connectedPermanence=spParams["synPermConnected"], # Minimum number of active synapses for a segment to be considered during # search for the best-matching segments. minThreshold=tmParams["minThreshold"], # The max number of synapses added to a segment during learning maxNewSynapseCount=tmParams["newSynapseCount"], permanenceIncrement=tmParams["permanenceInc"], permanenceDecrement=tmParams["permanenceDec"], predictedSegmentDecrement=0.0, maxSegmentsPerCell=tmParams["maxSegmentsPerCell"], maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"], seed=tmParams["seed"] ) classifier = SDRClassifierFactory.create() results = [] with open(_INPUT_FILE_PATH, "r") as fin: reader = csv.reader(fin) headers = reader.next() reader.next() reader.next() for count, record in enumerate(reader): if count >= numRecords: break # Convert data string into Python date object. dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M") # Convert data value string into float. consumption = float(record[1]) # To encode, we need to provide zero-filled numpy arrays for the encoders # to populate. timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth()) weekendBits = numpy.zeros(weekendEncoder.getWidth()) consumptionBits = numpy.zeros(scalarEncoder.getWidth()) # Now we call the encoders create bit representations for each value. timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits) weekendEncoder.encodeIntoArray(dateString, weekendBits) scalarEncoder.encodeIntoArray(consumption, consumptionBits) # Concatenate all these encodings into one large encoding for Spatial # Pooling. encoding = numpy.concatenate( [timeOfDayBits, weekendBits, consumptionBits] ) # Create an array to represent active columns, all initially zero. This # will be populated by the compute method below. It must have the same # dimensions as the Spatial Pooler. activeColumns = numpy.zeros(spParams["columnCount"]) # Execute Spatial Pooling algorithm over input space. sp.compute(encoding, True, activeColumns) activeColumnIndices = numpy.nonzero(activeColumns)[0] # Execute Temporal Memory algorithm over active mini-columns. tm.compute(activeColumnIndices, learn=True) activeCells = tm.getActiveCells() # Get the bucket info for this input value for classification. bucketIdx = scalarEncoder.getBucketIndices(consumption)[0] # Run classifier to translate active cells back to scalar value. classifierResult = classifier.compute( recordNum=count, patternNZ=activeCells, classification={ "bucketIdx": bucketIdx, "actValue": consumption }, learn=True, infer=True ) # Print the best prediction for 1 step out. oneStepConfidence, oneStep = sorted( zip(classifierResult[1], classifierResult["actualValues"]), reverse=True )[0] print("1-step: {:16} ({:4.4}%)".format(oneStep, oneStepConfidence * 100)) results.append([oneStep, oneStepConfidence * 100, None, None]) return results
def _runLearnInference(self, n=30, w=15, columnDimensions=2048, numActiveColumnsPerInhArea=40, spSeed=1951, spVerbosity=0, numTrainingRecords=100, seed=42): # Instantiate two identical spatial pooler. One will be used only for # learning. The other will be trained with identical records, but with # random inference calls thrown in spLearnOnly = SpatialPooler( columnDimensions=(columnDimensions, 1), inputDimensions=(1, n), potentialRadius=n/2, numActiveColumnsPerInhArea=numActiveColumnsPerInhArea, spVerbosity=spVerbosity, seed=spSeed, synPermInactiveDec=0.01, synPermActiveInc=0.2, synPermConnected=0.11,) spLearnInfer = SpatialPooler( columnDimensions=(columnDimensions, 1), inputDimensions=(1, n), potentialRadius=n/2, numActiveColumnsPerInhArea=numActiveColumnsPerInhArea, spVerbosity=spVerbosity, seed=spSeed, synPermInactiveDec=0.01, synPermActiveInc=0.2, synPermConnected=0.11,) random.seed(seed) np.random.seed(seed) # Build up training set with numTrainingRecords patterns inputs = [] # holds post-encoded input patterns for i in xrange(numTrainingRecords): inputVector = np.zeros(n, dtype=realDType) inputVector [random.sample(xrange(n), w)] = 1 inputs.append(inputVector) # Train each SP with identical inputs startTime = time.time() random.seed(seed) np.random.seed(seed) for i in xrange(numTrainingRecords): if spVerbosity > 0: print "Input #%d" % i # TODO: See https://github.com/numenta/nupic/issues/2072 encodedInput = inputs[i] decodedOutput = np.zeros(columnDimensions) spLearnOnly.compute(encodedInput, learn=True, activeArray=decodedOutput) random.seed(seed) np.random.seed(seed) for i in xrange(numTrainingRecords): if spVerbosity > 0: print "Input #%d" % i # TODO: See https://github.com/numenta/nupic/issues/2072 encodedInput = inputs[i] decodedOutput = np.zeros(columnDimensions) spLearnInfer.compute(encodedInput, learn=True, activeArray=decodedOutput) print "\nElapsed time: %.2f seconds\n" % (time.time() - startTime) # Test that both SP"s are identical by checking learning stats # A more in depth test would check all the coincidences, duty cycles, etc. # ala tpDiff # Edit: spDiff has been written as an in depth tester of the spatial pooler learnOnlyStats = spLearnOnly.getLearningStats() learnInferStats = spLearnInfer.getLearningStats() success = True # Check that the two spatial poolers are equivalent after the same training. success = success and spDiff(spLearnInfer, spLearnOnly) self.assertTrue(success) # Make sure that the pickled and loaded SPs are equivalent. spPickle = pickle.dumps(spLearnOnly, protocol=0) spLearnOnlyLoaded = pickle.loads(spPickle) success = success and spDiff(spLearnOnly, spLearnOnlyLoaded) self.assertTrue(success) for k in learnOnlyStats.keys(): if learnOnlyStats[k] != learnInferStats[k]: success = False print "Stat", k, "is different:", learnOnlyStats[k], learnInferStats[k] self.assertTrue(success) if success: print "Test succeeded"
def testSPNew(): """ New version of the test""" elemSize = 400 numSet = 42 addNear = True numRecords = 1000 wantPlot = False poolPct = 0.5 itr = 5 pattern = [60, 1000] doLearn = True start = 1 learnIter = 0 noLearnIter = 0 numLearns = 0 numTests = 0 numIter = 1 numGroups = 1000 PLOT_PRECISION = 100.0 distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1)) inputs = generateRandomInput(numGroups, elemSize, numSet) # Setup a SP sp = SpatialPooler( columnDimensions=(2048, 1), inputDimensions=(1, elemSize), potentialRadius=elemSize/2, numActiveColumnsPerInhArea=40, spVerbosity=0, stimulusThreshold=0, synPermConnected=0.12, seed=1, potentialPct=poolPct, globalInhibition=True ) cleanPlot = False for i in xrange(numRecords): input1 = getRandomWithMods(inputs, 4) if i % 2 == 0: input2 = getRandomWithMods(inputs, 4) else: input2 = input1.copy() input2 = modifyBits(input2, 21) inDist = (abs(input1-input2) > 0.1) intInDist = int(inDist.sum()/2+0.1) #print intInDist if start == 0: doLearn = True learnIter += 1 if learnIter == pattern[start]: numLearns += 1 start = 1 noLearnIter = 0 elif start == 1: doLearn = False noLearnIter += 1 if noLearnIter == pattern[start]: numTests += 1 start = 0 learnIter = 0 cleanPlot = True # TODO: See https://github.com/numenta/nupic/issues/2072 sp.compute(input1, learn=doLearn, activeArray=output1) sp.compute(input2, learn=doLearn, activeArray=output2) time.sleep(0.001) outDist = (abs(output1-output2) > 0.1) intOutDist = int(outDist.sum()/2+0.1) if not doLearn and intOutDist < 2 and intInDist > 10: """ sp.spVerbosity = 10 # TODO: See https://github.com/numenta/nupic/issues/2072 sp.compute(input1, learn=doLearn, activeArray=output1) sp.compute(input2, learn=doLearn, activeArray=output2) sp.spVerbosity = 0 print 'Elements has very small SP distance: %d' % intOutDist print output1.nonzero() print output2.nonzero() print sp._firingBoostFactors[output1.nonzero()[0]] print sp._synPermBoostFactors[output1.nonzero()[0]] print 'Input elements distance is %d' % intInDist print input1.nonzero() print input2.nonzero() sys.stdin.readline() """ if not doLearn: x = int(PLOT_PRECISION*intOutDist/40.0) y = int(PLOT_PRECISION*intInDist/42.0) if distribMatrix[x, y] < 0.1: distribMatrix[x, y] = 3 else: if distribMatrix[x, y] < 10: distribMatrix[x, y] += 1 #print i # If we don't want a plot, just continue if wantPlot and cleanPlot: plt.imshow(distribMatrix, origin='lower', interpolation = "nearest") plt.ylabel('SP (2048/40) distance in %') plt.xlabel('Input (400/42) distance in %') title = 'SP distribution' #if doLearn: # title += ', leaning ON' #else: # title += ', learning OFF' title += ', learn sets = %d' % numLearns title += ', test sets = %d' % numTests title += ', iter = %d' % numIter title += ', groups = %d' % numGroups title += ', Pct =%f' % poolPct plt.suptitle(title, fontsize=12) #plt.show() plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosNew', '%s' % i)) plt.clf() distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1)) cleanPlot = False
uintType = "uint32" inputDimensions = (1000,1) columnDimensions = (2048,1) inputSize = np.array(inputDimensions).prod() columnNumber = np.array(columnDimensions).prod() inputArray = np.zeros(inputSize, dtype=uintType) for i in range(inputSize): inputArray[i] = random.randrange(2) activeCols = np.zeros(columnNumber, dtype=uintType) sp = SP(inputDimensions, columnDimensions, potentialRadius = int(0.5*inputSize), numActiveColumnsPerInhArea = int(0.02*columnNumber), globalInhibition = True, seed = 1, synPermActiveInc = 0.01, synPermInactiveDec = 0.008 ) # Part 1: # ------- # A column connects to a subset of the input vector (specified # by both the potentialRadius and potentialPct). The overlap score # for a column is the number of connections to the input that become # active when presented with a vector. When learning is 'on' in the SP, # the active connections are reinforced, whereas those inactive are # depressed (according to parameters synPermActiveInc and synPermInactiveDec. # In order for the SP to create a sparse representation of the input, it # will select a small percentage (usually 2%) of its most active columns,
def runHotgym(): timeOfDayEncoder = DateEncoder(timeOfDay=(21,1)) weekendEncoder = DateEncoder(weekend=21) scalarEncoder = RandomDistributedScalarEncoder(0.88) encodingWidth = timeOfDayEncoder.getWidth() \ + weekendEncoder.getWidth() \ + scalarEncoder.getWidth() sp = SpatialPooler( # How large the input encoding will be. inputDimensions=(encodingWidth), # How many mini-columns will be in the Spatial Pooler. columnDimensions=(2048), # What percent of the columns's receptive field is available for potential # synapses? potentialPct=0.85, # This means that the input space has no topology. globalInhibition=True, localAreaDensity=-1.0, # Roughly 2%, giving that there is only one inhibition area because we have # turned on globalInhibition (40 / 2048 = 0.0195) numActiveColumnsPerInhArea=40.0, # How quickly synapses grow and degrade. synPermInactiveDec=0.005, synPermActiveInc=0.04, synPermConnected=0.1, # boostStrength controls the strength of boosting. Boosting encourages # efficient usage of SP columns. boostStrength=3.0, # Random number generator seed. seed=1956, # Determines if inputs at the beginning and end of an input dimension should # be considered neighbors when mapping columns to inputs. wrapAround=False ) tm = TemporalMemory( # Must be the same dimensions as the SP columnDimensions=(2048, ), # How many cells in each mini-column. cellsPerColumn=32, # A segment is active if it has >= activationThreshold connected synapses # that are active due to infActiveState activationThreshold=16, initialPermanence=0.21, connectedPermanence=0.5, # Minimum number of active synapses for a segment to be considered during # search for the best-matching segments. minThreshold=12, # The max number of synapses added to a segment during learning maxNewSynapseCount=20, permanenceIncrement=0.1, permanenceDecrement=0.1, predictedSegmentDecrement=0.0, maxSegmentsPerCell=128, maxSynapsesPerSegment=32, seed=1960 ) classifier = SDRClassifierFactory.create() with open (_INPUT_FILE_PATH) as fin: reader = csv.reader(fin) headers = reader.next() reader.next() reader.next() for count, record in enumerate(reader): # Convert data string into Python date object. dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M") # Convert data value string into float. consumption = float(record[1]) # To encode, we need to provide zero-filled numpy arrays for the encoders # to populate. timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth()) weekendBits = numpy.zeros(weekendEncoder.getWidth()) consumptionBits = numpy.zeros(scalarEncoder.getWidth()) # Now we call the encoders create bit representations for each value. timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits) weekendEncoder.encodeIntoArray(dateString, weekendBits) scalarEncoder.encodeIntoArray(consumption, consumptionBits) # Concatenate all these encodings into one large encoding for Spatial # Pooling. encoding = numpy.concatenate( [timeOfDayBits, weekendBits, consumptionBits] ) # Create an array to represent active columns, all initially zero. This # will be populated by the compute method below. It must have the same # dimensions as the Spatial Pooler. activeColumns = numpy.zeros(2048) # Execute Spatial Pooling algorithm over input space. sp.compute(encoding, True, activeColumns) activeColumnIndices = numpy.nonzero(activeColumns)[0] # Execute Temporal Memory algorithm over active mini-columns. tm.compute(activeColumnIndices, learn=True) activeCells = tm.getActiveCells() # Get the bucket info for this input value for classification. bucketIdx = scalarEncoder.getBucketIndices(consumption)[0] # Run classifier to translate active cells back to scalar value. classifierResult = classifier.compute( recordNum=count, patternNZ=activeCells, classification={ "bucketIdx": bucketIdx, "actValue": consumption }, learn=True, infer=True ) # Print the best prediction for 1 step out. probability, value = sorted( zip(classifierResult[1], classifierResult["actualValues"]), reverse=True )[0] print("1-step: {:16} ({:4.4}%)".format(value, probability * 100))
class SpatialPoolerAPITest(unittest.TestCase): """Tests for SpatialPooler public API""" def setUp(self): self.sp = SpatialPooler(columnDimensions=[5], inputDimensions=[5]) def testCompute(self): # Check that there are no errors in call to compute inputVector = numpy.ones(5) activeArray = numpy.zeros(5) self.sp.compute(inputVector, True, activeArray) def testGetUpdatePeriod(self): inParam = 1234 self.sp.setUpdatePeriod(inParam) outParam = self.sp.getUpdatePeriod() self.assertEqual(inParam, outParam) def testGetPotentialRadius(self): inParam = 56 self.sp.setPotentialRadius(inParam) outParam = self.sp.getPotentialRadius() self.assertEqual(inParam, outParam) def testGetPotentialPct(self): inParam = 0.4 self.sp.setPotentialPct(inParam) outParam = self.sp.getPotentialPct() self.assertAlmostEqual(inParam, outParam) def testGetGlobalInhibition(self): inParam = True self.sp.setGlobalInhibition(inParam) outParam = self.sp.getGlobalInhibition() self.assertEqual(inParam, outParam) inParam = False self.sp.setGlobalInhibition(inParam) outParam = self.sp.getGlobalInhibition() self.assertEqual(inParam, outParam) def testGetNumActiveColumnsPerInhArea(self): inParam = 7 self.sp.setNumActiveColumnsPerInhArea(inParam) outParam = self.sp.getNumActiveColumnsPerInhArea() self.assertEqual(inParam, outParam) def testGetLocalAreaDensity(self): inParam = 0.4 self.sp.setLocalAreaDensity(inParam) outParam = self.sp.getLocalAreaDensity() self.assertAlmostEqual(inParam, outParam) def testGetStimulusThreshold(self): inParam = 89 self.sp.setStimulusThreshold(inParam) outParam = self.sp.getStimulusThreshold() self.assertEqual(inParam, outParam) def testGetInhibitionRadius(self): inParam = 4 self.sp.setInhibitionRadius(inParam) outParam = self.sp.getInhibitionRadius() self.assertEqual(inParam, outParam) def testGetDutyCyclePeriod(self): inParam = 2020 self.sp.setDutyCyclePeriod(inParam) outParam = self.sp.getDutyCyclePeriod() self.assertEqual(inParam, outParam) def testGetMaxBoost(self): inParam = 78 self.sp.setMaxBoost(inParam) outParam = self.sp.getMaxBoost() self.assertEqual(inParam, outParam) def testGetIterationNum(self): inParam = 999 self.sp.setIterationNum(inParam) outParam = self.sp.getIterationNum() self.assertEqual(inParam, outParam) def testGetIterationLearnNum(self): inParam = 666 self.sp.setIterationLearnNum(inParam) outParam = self.sp.getIterationLearnNum() self.assertEqual(inParam, outParam) def testGetSpVerbosity(self): inParam = 2 self.sp.setSpVerbosity(inParam) outParam = self.sp.getSpVerbosity() self.assertEqual(inParam, outParam) def testGetSynPermTrimThreshold(self): inParam = 0.7 self.sp.setSynPermTrimThreshold(inParam) outParam = self.sp.getSynPermTrimThreshold() self.assertAlmostEqual(inParam, outParam) def testGetSynPermActiveInc(self): inParam = 0.567 self.sp.setSynPermActiveInc(inParam) outParam = self.sp.getSynPermActiveInc() self.assertAlmostEqual(inParam, outParam) def testGetSynPermInactiveDec(self): inParam = 0.123 self.sp.setSynPermInactiveDec(inParam) outParam = self.sp.getSynPermInactiveDec() self.assertAlmostEqual(inParam, outParam) def testGetSynPermBelowStimulusInc(self): inParam = 0.0898 self.sp.setSynPermBelowStimulusInc(inParam) outParam = self.sp.getSynPermBelowStimulusInc() self.assertAlmostEqual(inParam, outParam) def testGetSynPermConnected(self): inParam = 0.514 self.sp.setSynPermConnected(inParam) outParam = self.sp.getSynPermConnected() self.assertAlmostEqual(inParam, outParam) def testGetMinPctOverlapDutyCycles(self): inParam = 0.11122 self.sp.setMinPctOverlapDutyCycles(inParam) outParam = self.sp.getMinPctOverlapDutyCycles() self.assertAlmostEqual(inParam, outParam) def testGetMinPctActiveDutyCycles(self): inParam = 0.444333 self.sp.setMinPctActiveDutyCycles(inParam) outParam = self.sp.getMinPctActiveDutyCycles() self.assertAlmostEqual(inParam, outParam) def testGetPermanence(self): numInputs = 5 numColumns = 5 self.sp.initialize(columnDimensions=[numInputs], inputDimensions=[numColumns], potentialRadius=1, potentialPct=1) inParam = numpy.array([0.06, 0.07, 0.08, 0.12, 0.13]).astype(realType) self.sp.setPermanence(0, inParam) outParam = numpy.zeros(numInputs).astype(realType) self.sp.getPermanence(0, outParam) self.assertListEqual(list(inParam), list(outParam)) def testGetBoostFactors(self): numInputs = 3 numColumns = 3 self.sp.initialize(columnDimensions=[numInputs], inputDimensions=[numColumns]) inParam = numpy.array([ 1, 1.2, 1.3, ]).astype(realType) self.sp.setBoostFactors(inParam) outParam = numpy.zeros(numInputs).astype(realType) self.sp.getBoostFactors(outParam) self.assertListEqual(list(inParam), list(outParam)) def testGetOverlapDutyCycles(self): numInputs = 3 numColumns = 3 self.sp.initialize(columnDimensions=[numInputs], inputDimensions=[numColumns]) inParam = numpy.array([0.9, 0.3, 0.1]).astype(realType) self.sp.setOverlapDutyCycles(inParam) outParam = numpy.zeros(numInputs).astype(realType) self.sp.getOverlapDutyCycles(outParam) self.assertListEqual(list(inParam), list(outParam)) def testGetActiveDutyCycles(self): numInputs = 3 numColumns = 3 self.sp.initialize(columnDimensions=[numInputs], inputDimensions=[numColumns]) inParam = numpy.array([ 0.9, 0.99, 0.999, ]).astype(realType) self.sp.setActiveDutyCycles(inParam) outParam = numpy.zeros(numInputs).astype(realType) self.sp.getActiveDutyCycles(outParam) self.assertListEqual(list(inParam), list(outParam)) def testGetMinOverlapDutyCycles(self): numInputs = 3 numColumns = 3 self.sp.initialize(columnDimensions=[numInputs], inputDimensions=[numColumns]) inParam = numpy.array([ 0.01, 0.02, 0.035, ]).astype(realType) self.sp.setMinOverlapDutyCycles(inParam) outParam = numpy.zeros(numInputs).astype(realType) self.sp.getMinOverlapDutyCycles(outParam) self.assertListEqual(list(inParam), list(outParam)) def testGetMinActiveDutyCycles(self): numInputs = 3 numColumns = 3 self.sp.initialize(columnDimensions=[numInputs], inputDimensions=[numColumns]) inParam = numpy.array([ 0.01, 0.02, 0.035, ]).astype(realType) self.sp.setMinActiveDutyCycles(inParam) outParam = numpy.zeros(numInputs).astype(realType) self.sp.getMinActiveDutyCycles(outParam) self.assertListEqual(list(inParam), list(outParam)) def testGetPotential(self): self.sp.initialize(columnDimensions=[3], inputDimensions=[3]) numInputs = 3 numColumns = 3 self.sp.initialize(columnDimensions=[numInputs], inputDimensions=[numColumns]) inParam1 = numpy.array([1, 0, 1]).astype(uintType) self.sp.setPotential(0, inParam1) inParam2 = numpy.array([1, 1, 0]).astype(uintType) self.sp.setPotential(1, inParam2) outParam1 = numpy.zeros(numInputs).astype(uintType) outParam2 = numpy.zeros(numInputs).astype(uintType) self.sp.getPotential(0, outParam1) self.sp.getPotential(1, outParam2) self.assertListEqual(list(inParam1), list(outParam1)) self.assertListEqual(list(inParam2), list(outParam2)) def testGetConnectedSynapses(self): numInputs = 5 numColumns = 5 self.sp.initialize(columnDimensions=[numInputs], inputDimensions=[numColumns], potentialRadius=1, potentialPct=1) inParam = numpy.array([0.06, 0.07, 0.08, 0.12, 0.13]).astype(realType) trueConnected = numpy.array([0, 0, 0, 1, 1]) self.sp.setSynPermConnected(0.1) self.sp.setPermanence(0, inParam) outParam = numpy.zeros(numInputs).astype(uintType) self.sp.getConnectedSynapses(0, outParam) self.assertListEqual(list(trueConnected), list(outParam)) def testGetConnectedCounts(self): numInputs = 5 numColumns = 5 self.sp.initialize(columnDimensions=[numInputs], inputDimensions=[numColumns], potentialRadius=1, potentialPct=1) inParam = numpy.array([0.06, 0.07, 0.08, 0.12, 0.11]).astype(realType) trueConnectedCount = 2 self.sp.setSynPermConnected(0.1) self.sp.setPermanence(0, inParam) outParam = numpy.zeros(numInputs).astype(uintType) self.sp.getConnectedCounts(outParam) self.assertEqual(trueConnectedCount, outParam[0]) def assertListAlmostEqual(self, alist, blist): self.assertEqual(len(alist), len(blist)) for (a, b) in zip(alist, blist): diff = abs(a - b) self.assertLess(diff, 1e-5)
class FeedbackModel(LearningModel): """ Structure: WordEncoder -> WordSP -> WordTM ActionEncoder -> ActionSP -> ActionTM WordTM, ActionTM -> GeneralSP -> GeneralTM """ def __init__(self, wordEncoder, actionEncoder, trainingSet, modulesParams=None): """ @param wordEncoder @param actionEncoder @param trainingSet: A module containing the trainingData, all of its categories and the inputIdx dict that maps each index in categories to an input name. """ super(FeedbackModel, self).__init__(wordEncoder, actionEncoder, trainingSet, modulesParams) self.initModules(trainingSet.categories, trainingSet.inputIdx) self.structure = { 'wordInput': 'wordEnc', 'wordEnc': 'wordSP', 'wordSP': 'wordTM', 'wordTM': 'generalSP', ### 'actionInput': 'actionEnc', 'actionEnc': 'actionSP', 'actionSP': 'actionTM', 'actionTM': 'generalSP', ### 'generalSP': 'generalTM', 'generalTM': None } self.modules = { 'generalTM': self.generalTM, #'generalSP': self.generalSP, 'wordTM': self.wordTM, 'wordSP': self.wordSP, 'wordEnc': self.wordEncoder, 'actionTM': self.actionTM, 'actionSP': self.actionSP, 'actionEnc': self.actionEncoder } #self.layer = Layer(self.structure, self.modules, self.classifier) def initModules(self, categories, inputIdx): modulesNames = {'wordSP', 'wordTM', 'actionSP', 'actionTM', 'generalTM'} if (self.modulesParams is not None) and\ (set(self.modulesParams) == modulesNames): self.modulesParams['wordSP'].update(self.defaultWordSPParams) self.modulesParams['wordTM'].update(self.defaultWordTMParams) self.modulesParams['actionSP'].update(self.defaultActionSPParams) self.modulesParams['actionTM'].update(self.defaultActionTMParams) self.wordSP = SpatialPooler(**self.modulesParams['wordSP']) self.wordTM = TemporalMemory(**self.modulesParams['wordTM']) self.actionSP = SpatialPooler(**self.modulesParams['actionSP']) self.actionTM = TemporalMemory(**self.modulesParams['actionTM']) defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.modulesParams['generalTM'].update(defaultGeneralTMParams) self.generalTM = TemporalMemory(**self.modulesParams['generalTM']) print("Using external Parameters!") else: self.wordSP = SpatialPooler(**self.defaultWordSPParams) self.wordTM = TemporalMemory(**self.defaultWordTMParams) self.actionSP = SpatialPooler(**self.defaultActionSPParams) self.actionTM = TemporalMemory(**self.defaultActionTMParams) print("External parameters invalid or not found, using"\ " the default ones") defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.generalTM = TemporalMemory(**defaultGeneralTMParams) self.classifier = CLAClassifierCond( steps=[1, 2, 3], alpha=0.1, actValueAlpha=0.3, verbosity=0 ) self.startPointOverlap = CommonOverlap('==', 1, self.actionTM.columnDimensions, threshold=0.5) def processInput(self, sentence, actionSeq, wordSDR=None, actionSDR=None, verbosity=0, learn=True): if wordSDR is None: wordSDR = numpy.zeros(self.wordSP.getColumnDimensions(), dtype=numpy.uint8) if actionSDR is None: actionSDR = numpy.zeros(self.actionSP.getColumnDimensions(), dtype=numpy.uint8) nCellsFromSentence = self.generalTM.columnDimensions[1] sentenceActiveCells = set() actionSeqActiveCells = set() recordNum = 0 # Feed the words from the sentence to the region 1 for word in sentence: encodedWord = self.wordEncoder.encode(word) self.wordSP.compute(encodedWord, learn, wordSDR) self.wordTM.compute( set(numpy.where(wordSDR > 0)[0]), learn ) region1Predicting = (self.wordTM.predictiveCells != set()) sentenceActiveCells.update(self.wordTM.getActiveCells()) #print("{} - {}".format(word, )) retVal = self.classifier.compute( recordNum=recordNum, patternNZ=self.wordTM.getActiveCells(), classification={ 'bucketIdx': self.wordEncoder.getBucketIndices(word)[0], 'actValue': word }, learn=learn, infer=True, conditionFunc=lambda x: x.endswith("-event") ) recordNum += 1 bestPredictions = [] for step in retVal: if step == 'actualValues': continue higherProbIndex = numpy.argmax(retVal[step]) bestPredictions.append( retVal['actualValues'][higherProbIndex] ) if region1Predicting: # Feed the sentence to the region 2 self.generalTM.compute(sentenceActiveCells, learn) generalPrediction = set(self.generalTM.mapCellsToColumns( self.generalTM.predictiveCells ).keys()) # Normalize predictions so cells stay in the actionTM # range. generalPrediction = set([i - nCellsFromSentence for i in generalPrediction if i >= nCellsFromSentence]) # columnsPrediction = numpy.zeros( # self.actionSP.getNumColumns(), # dtype=numpy.uint8 # ) # columnsPrediction[self.actionTM.mapCellsToColumns( # generalPrediction).keys()] = 1 # self.startPointOverlap.updateCounts(columnsPrediction) # # if len(actionSeq) <= 0: # # assert region1Predicting, "Region 1 is not predicting, consider "\ # "training the model for a longer time" # predictedValues = [] # # firstColumns = numpy.where(numpy.bitwise_and(columnsPrediction > 0, # self.startPointOverlap.commonElements)) # # predictedEnc = numpy.zeros(self.actionEncoder.getWidth(), # dtype=numpy.uint8) # predictedEnc[ # [self.actionSP._mapColumn(col) for col in firstColumns]] = 1 # predictedValues.append(self.actionEncoder.decode(predictedEnc)) # # print(firstColumns) # # self.actionTM.predictiveCells.update(generalPrediction) # self.actionTM.compute(firstColumns, learn) # # predictedColumns = self.actionTM.mapCellsToColumns( # self.actionTM.predictiveCells).keys()[0] for action in actionSeq: encodedAction = self.actionEncoder.encode(action) # Use the predicted cells from region 2 to bias the # activity of cells in region 1. if region1Predicting: self.actionTM.predictiveCells.update(generalPrediction) self.actionSP.compute(encodedAction, learn, actionSDR) self.actionTM.compute( set(numpy.where(actionSDR > 0)[0]), learn ) actionActiveCells = [i + nCellsFromSentence for i in self.actionTM.getActiveCells()] actionSeqActiveCells.update(actionActiveCells) self.classifier.compute( recordNum=recordNum, patternNZ=actionActiveCells, classification={ 'bucketIdx': self.wordEncoder.getWidth() + self.actionEncoder.getBucketIndices(action)[0], 'actValue': action }, learn=learn, infer=True, conditionFunc=lambda x: x.endswith("-event") ) recordNum += 1 if region1Predicting: self.generalTM.compute( actionSeqActiveCells, True ) if verbosity > 0: print('Best Predictions: ' + str(bestPredictions)) if verbosity > 3: print(" | CLAClassifier best predictions for step1: ") top = sorted(retVal[1].tolist(), reverse=True)[:3] for prob in top: probIndex = retVal[1].tolist().index(prob) print(str(retVal['actualValues'][probIndex]) + " - " + str(prob)) print(" | CLAClassifier best predictions for step2: ") top = sorted(retVal[2].tolist(), reverse=True)[:3] for prob in top: probIndex = retVal[2].tolist().index(prob) print(str(retVal['actualValues'][probIndex]) + " - " + str(prob)) print("") print("---------------------------------------------------") print("") return bestPredictions def train(self, numIterations, trainingData=None, maxTime=-1, verbosity=0): """ @param numIterations @param trainingData @param maxTime: (default: -1) Training stops if maxTime (in minutes) is exceeded. Note that this may interrupt an ongoing train ireration. -1 is no time restrictions. @param verbosity: (default: 0) How much verbose about the process. 0 doesn't print anything. """ startTime = time.time() maxTimeReached = False recordNum = 0 if trainingData is None: trainingData = self.trainingData wordSDR = numpy.zeros(self.wordSP.getColumnDimensions(), dtype=numpy.uint8) actionSDR = numpy.zeros(self.actionSP.getColumnDimensions(), dtype=numpy.uint8) #generalSDR = numpy.zeros(self.generalSP.getColumnDimensions(), # dtype=numpy.uint8) generalInput = numpy.zeros(self.generalTM.numberOfColumns(), dtype=numpy.uint8) for iteration in xrange(numIterations): print("Iteration " + str(iteration)) for sentence, actionSeq in trainingData: self.processInput(sentence, actionSeq, wordSDR, actionSDR) self.reset() recordNum += 1 if maxTime > 0: elapsedMinutes = (time.time() - startTime) * (1.0 / 60.0) if elapsedMinutes > maxTime: maxTimeReached = True print("maxTime reached, training stoped at iteration "\ "{}!".format(self.iterationsTrained)) break if maxTimeReached: break self.iterationsTrained += 1 def inputSentence(self, sentence, verbosity=1, learn=False): return self.processInput(sentence, [], verbosity=verbosity, learn=learn)
class Region(Node): """ A class only to group properties related to regions. """ #region Constructor def __init__(self, name): """ Initializes a new instance of this class. """ Node.__init__(self, name, NodeType.region) #region Instance fields self.columns = [] """List of columns that compose this region""" self._inputMap = [] """An array representing the input map for this region.""" #region Spatial Parameters self.enableSpatialLearning = True """Switch for spatial learning""" self.potentialRadius = 0 """This parameter determines the extent of the input that each column can potentially be connected to. This can be thought of as the input bits that are visible to each column, or a 'receptiveField' of the field of vision. A large enough value will result in 'global coverage', meaning that each column can potentially be connected to every input bit. This parameter defines a square (or hyper square) area: a column will have a max square potential pool with sides of length 2 * potentialRadius + 1.""" self.potentialPct = 0.5 """The percent of the inputs, within a column's potential radius, that a column can be connected to. If set to 1, the column will be connected to every input within its potential radius. This parameter is used to give each column a unique potential pool when a large potentialRadius causes overlap between the columns. At initialization time we choose ((2*potentialRadius + 1)^(# inputDimensions) * potentialPct) input bits to comprise the column's potential pool.""" self.globalInhibition = False """If true, then during inhibition phase the winning columns are selected as the most active columns from the region as a whole. Otherwise, the winning columns are selected with respect to their local neighborhoods. Using global inhibition boosts performance x60.""" self.localAreaDensity = -1.0 """The desired density of active columns within a local inhibition area (the size of which is set by the internally calculated inhibitionRadius, which is in turn determined from the average size of the connected potential pools of all columns). The inhibition logic will insure that at most N columns remain ON within a local inhibition area, where N = localAreaDensity * (total number of columns in inhibition area).""" self.numActiveColumnsPerInhArea = int(0.02 * (self.width * self.height)) """An alternate way to control the density of the active columns. If numActiveColumnsPerInhArea is specified then localAreaDensity must be less than 0, and vice versa. When using numActiveColumnsPerInhArea, the inhibition logic will insure that at most 'numActiveColumnsPerInhArea' columns remain ON within a local inhibition area (the size of which is set by the internally calculated inhibitionRadius, which is in turn determined from the average size of the connected receptive fields of all columns). When using this method, as columns learn and grow their effective receptive fields, the inhibitionRadius will grow, and hence the net density of the active columns will *decrease*. This is in contrast to the localAreaDensity method, which keeps the density of active columns the same regardless of the size of their receptive fields.""" self.stimulusThreshold = 0 """This is a number specifying the minimum number of synapses that must be on in order for a columns to turn ON. The purpose of this is to prevent noise input from activating columns. Specified as a percent of a fully grown synapse.""" self.proximalSynConnectedPerm = 0.10 """The default connected threshold. Any synapse whose permanence value is above the connected threshold is a "connected synapse", meaning it can contribute to the cell's firing.""" self.proximalSynPermIncrement = 0.1 """The amount by which an active synapse is incremented in each round. Specified as a percent of a fully grown synapse.""" self.proximalSynPermDecrement = 0.01 """The amount by which an inactive synapse is decremented in each round. Specified as a percent of a fully grown synapse.""" self.minPctOverlapDutyCycle = 0.001 """A number between 0 and 1.0, used to set a floor on how often a column should have at least stimulusThreshold active inputs. Periodically, each column looks at the overlap duty cycle of all other columns within its inhibition radius and sets its own internal minimal acceptable duty cycle to: minPctDutyCycleBeforeInh * max(other columns' duty cycles). On each iteration, any column whose overlap duty cycle falls below this computed value will get all of its permanence values boosted up by synPermActiveInc. Raising all permanences in response to a sub-par duty cycle before inhibition allows a cell to search for new inputs when either its previously learned inputs are no longer ever active, or when the vast majority of them have been "hijacked" by other columns.""" self.minPctActiveDutyCycle = 0.001 """A number between 0 and 1.0, used to set a floor on how often a column should be activate. Periodically, each column looks at the activity duty cycle of all other columns within its inhibition radius and sets its own internal minimal acceptable duty cycle to: minPctDutyCycleAfterInh * max(other columns' duty cycles). On each iteration, any column whose duty cycle after inhibition falls below this computed value will get its internal boost factor increased.""" self.dutyCyclePeriod = 1000 """The period used to calculate duty cycles. Higher values make it take longer to respond to changes in boost or synPerConnectedCell. Shorter values make it more unstable and likely to oscillate.""" self.maxBoost = 10.0 """The maximum overlap boost factor. Each column's overlap gets multiplied by a boost factor before it gets considered for inhibition. The actual boost factor for a column is number between 1.0 and maxBoost. A boost factor of 1.0 is used if the duty cycle is >= minOverlapDutyCycle, maxBoost is used if the duty cycle is 0, and any duty cycle in between is linearly extrapolated from these 2 endpoints.""" self.spSeed = -1 """Seed for generate random values""" #endregion #region Temporal Parameters self.enableTemporalLearning = True """Switch for temporal learning""" self.numCellsPerColumn = 10 """Number of cells per column. More cells, more contextual information""" self.distalSynInitialPerm = 0.11 """The initial permanence of an distal synapse.""" self.distalSynConnectedPerm = 0.50 """The default connected threshold. Any synapse whose permanence value is above the connected threshold is a "connected synapse", meaning it can contribute to the cell's firing.""" self.distalSynPermIncrement = 0.10 """The amount by which an active synapse is incremented in each round. Specified as a percent of a fully grown synapse.""" self.distalSynPermDecrement = 0.10 """The amount by which an inactive synapse is decremented in each round. Specified as a percent of a fully grown synapse.""" self.minThreshold = 8 """If the number of synapses active on a segment is at least this threshold, it is selected as the best matching cell in a bursing column.""" self.activationThreshold = 12 """If the number of active connected synapses on a segment is at least this threshold, the segment is said to be active.""" self.maxNumNewSynapses = 15 """The maximum number of synapses added to a segment during learning.""" self.tpSeed = 42 """Seed for generate random values""" #endregion self.spatialPooler = None """Spatial Pooler instance""" self.temporalPooler = None """Temporal Pooler instance""" #endregion #region Statistics properties self.statsPrecisionRate = 0. #endregion #endregion #region Methods def getColumn(self, x, y): """ Return the column located at given position """ column = self.columns[(y * self.width) + x] return column def getInputSize(self): """ Return the sum of sizes of all feeder nodes. """ sumSizes = 0 for feeder in Global.project.network.getFeederNodes(self): sumSizes += feeder.width * feeder.height return sumSizes def initialize(self): """ Initialize this node. """ # Check if this region has nodes that feed it numFeeders = len(Global.project.network.getFeederNodes(self)) if numFeeders == 0: QtGui.QMessageBox.warning(None, "Warning", "Region '" + self.name + "' does not have any child!") return # Initialize this node and the nodes that feed it Node.initialize(self) # Create the input map # An input map is a set of input elements (cells or sensor bits) that should are grouped # For example, if we have 2 nodes that feed this region (#1 and #2) with dimensions 6 and 12 respectively, # a input map would be something like: # 111111222222222222 self._inputMap = [] elemIdx = 0 for feeder in Global.project.network.getFeederNodes(self): # Arrange input from feeder into input map of this region if feeder.type == NodeType.region: for column in feeder.columns: inputElem = column.cells[0] self._inputMap.append(inputElem) else: for bit in feeder.bits: inputElem = bit self._inputMap.append(inputElem) elemIdx += 1 # Initialize elements self.columns = [] colIdx = 0 for x in range(self.width): for y in range(self.height): column = Column() column.x = x column.y = y for z in range(self.numCellsPerColumn): cell = Cell() cell.index = (colIdx * self.numCellsPerColumn) + z cell.z = z column.cells.append(cell) self.columns.append(column) colIdx += 1 # Create Spatial Pooler instance with appropriate parameters self.spatialPooler = SpatialPooler( inputDimensions = (self.getInputSize(), 1), columnDimensions = (self.width, self.height), potentialRadius = self.potentialRadius, potentialPct = self.potentialPct, globalInhibition = self.globalInhibition, localAreaDensity = self.localAreaDensity, numActiveColumnsPerInhArea = self.numActiveColumnsPerInhArea, stimulusThreshold = self.stimulusThreshold, synPermInactiveDec = self.proximalSynPermDecrement, synPermActiveInc = self.proximalSynPermIncrement, synPermConnected = self.proximalSynConnectedPerm, minPctOverlapDutyCycle = self.minPctOverlapDutyCycle, minPctActiveDutyCycle = self.minPctActiveDutyCycle, dutyCyclePeriod = self.dutyCyclePeriod, maxBoost = self.maxBoost, seed = self.spSeed, spVerbosity = False) # Create Temporal Pooler instance with appropriate parameters self.temporalPooler = TemporalPooler( columnDimensions = (self.width, self.height), cellsPerColumn = self.numCellsPerColumn, initialPermanence = self.distalSynInitialPerm, connectedPermanence = self.distalSynConnectedPerm, minThreshold = self.minThreshold, maxNewSynapseCount = self.maxNumNewSynapses, permanenceIncrement = self.distalSynPermIncrement, permanenceDecrement = self.distalSynPermDecrement, activationThreshold = self.activationThreshold, seed = self.tpSeed) return True def nextStep(self): """ Perfoms actions related to time step progression. """ Node.nextStep(self) for column in self.columns: column.nextStep() # Get input from sensors or lower regions and put into a single input map. input = self.getInput() # Send input to Spatial Pooler and get processed output (i.e. the active columns) # First initialize the vector for representing the current record columnDimensions = (self.width, self.height) columnNumber = numpy.array(columnDimensions).prod() activeColumns = numpy.zeros(columnNumber) self.spatialPooler.compute(input, self.enableSpatialLearning, activeColumns) # Send active columns to Temporal Pooler and get processed output (i.e. the predicting cells) # First convert active columns from float array to integer set activeColumnsSet = set() for colIdx in range(len(activeColumns)): if activeColumns[colIdx] == 1: activeColumnsSet.add(colIdx) self.temporalPooler.compute(activeColumnsSet, self.enableTemporalLearning) # Update elements regarding spatial pooler self.updateSpatialElements(activeColumns) # Update elements regarding temporal pooler self.updateTemporalElements() # Get the predicted values self.getPredictions() #TODO: self._output = self.temporalPooler.getPredictedState() def getPredictions(self): """ Get the predicted values after an iteration. """ for feeder in Global.project.network.getFeederNodes(self): feeder.getPredictions() def calculateStatistics(self): """ Calculate statistics after an iteration. """ # The region's prediction precision is the average between the nodes that feed it precisionRate = 0. numFeeders = 0 for feeder in Global.project.network.getFeederNodes(self): precisionRate += feeder.statsPrecisionRate numFeeders += 1 self.statsPrecisionRate = precisionRate / numFeeders for column in self.columns: column.calculateStatistics() def getInput(self): """ Get input from sensors or lower regions and put into a single input map. """ # Initialize the vector for representing the current input map inputList = [] for inputElem in self._inputMap: if inputElem.isActive.atCurrStep(): inputList.append(1) else: inputList.append(0) input = numpy.array(inputList) return input def updateSpatialElements(self, activeColumns): """ Update elements regarding spatial pooler """ # Update proximal segments and synapses according to active columns for colIdx in range(len(self.columns)): column = self.columns[colIdx] # Update proximal segment segment = column.segment if activeColumns[colIdx] == 1: segment.isActive.setForCurrStep(True) else: segment.isActive.setForCurrStep(False) # Check if proximal segment is predicted by check if the column has any predicted cell for cell in column.cells: if cell.index in self.temporalPooler.predictiveCells: segment.isPredicted.setForCurrStep(True) # Update proximal synapses if segment.isActive.atCurrStep() or segment.isPredicted.atCurrStep(): permanencesSynapses = [] self.spatialPooler.getPermanence(colIdx, permanencesSynapses) connectedSynapses = [] self.spatialPooler.getConnectedSynapses(colIdx, connectedSynapses) for synIdx in range(len(permanencesSynapses)): # Get the proximal synapse given its position in the input map # Create a new one if it doesn't exist synapse = segment.getSynapse(synIdx) # Update proximal synapse if permanencesSynapses[synIdx] > 0.: if synapse == None: # Create a new synapse to a input element # An input element is a column if feeder is a region # or then a bit if feeder is a sensor synapse = Synapse() synapse.inputElem = self._inputMap[synIdx] synapse.indexSP = synIdx segment.synapses.append(synapse) # Update state synapse.isRemoved.setForCurrStep(False) synapse.permanence.setForCurrStep(permanencesSynapses[synIdx]) if connectedSynapses[synIdx] == 1: synapse.isConnected.setForCurrStep(True) else: synapse.isConnected.setForCurrStep(False) else: if synapse != None: synapse.isRemoved.setForCurrStep(True) def updateTemporalElements(self): """ Update elements regarding temporal pooler """ # Update cells, distal segments and synapses according to active columns for colIdx in range(len(self.columns)): column = self.columns[colIdx] # Mark proximal segment and its connected synapses as predicted if column.segment.isPredicted.atCurrStep(): for synapse in column.segment.synapses: if synapse.isConnected.atCurrStep(): synapse.isPredicted.setForCurrStep(True) synapse.inputElem.isPredicted.setForCurrStep(True) # Mark proximal segment and its connected synapses that were predicted but are not active now if column.segment.isPredicted.atPreviousStep(): if not column.segment.isActive.atCurrStep(): column.segment.isFalselyPredicted.setForCurrStep(True) for synapse in column.segment.synapses: if (synapse.isPredicted.atPreviousStep() and not synapse.isConnected.atCurrStep()) or (synapse.isConnected.atCurrStep() and synapse.inputElem.isFalselyPredicted.atCurrStep()): synapse.isFalselyPredicted.setForCurrStep(True) for cell in column.cells: cellIdx = cell.index # Update cell's states if cellIdx in self.temporalPooler.winnerCells: cell.isLearning.setForCurrStep(True) if cellIdx in self.temporalPooler.activeCells: cell.isActive.setForCurrStep(True) if cellIdx in self.temporalPooler.predictiveCells: cell.isPredicted.setForCurrStep(True) if cell.isPredicted.atPreviousStep() and not cell.isActive.atCurrStep(): cell.isFalselyPredicted.setForCurrStep(True) # Get the indexes of the distal segments of this cell segmentsForCell = self.temporalPooler.connections.segmentsForCell(cellIdx) # Add the segments that appeared after last iteration for segIdx in segmentsForCell: # Check if segment already exists in the cell segFound = False for segment in cell.segments: if segment.indexTP == segIdx: segFound = True break # If segment is new, add it to cell if not segFound: segment = Segment(SegmentType.distal) segment.indexTP = segIdx cell.segments.append(segment) # Update distal segments for segment in cell.segments: segIdx = segment.indexTP # If segment not found in segments indexes returned in last iteration mark it as removed if segIdx in segmentsForCell: # Update segment's state if segIdx in self.temporalPooler.activeSegments: segment.isActive.setForCurrStep(True) else: segment.isActive.setForCurrStep(False) # Get the indexes of the synapses of this segment synapsesForSegment = self.temporalPooler.connections.synapsesForSegment(segIdx) # Add the synapses that appeared after last iteration for synIdx in synapsesForSegment: # Check if synapse already exists in the segment synFound = False for synapse in segment.synapses: if synapse.indexTP == synIdx: synFound = True break # If synapse is new, add it to segment if not synFound: synapse = Synapse() synapse.indexTP = synIdx segment.synapses.append(synapse) # Update synapses for synapse in segment.synapses: synIdx = synapse.indexTP # If synapse not found in synapses indexes returned in last iteration mark it as removed if synIdx in synapsesForSegment: # Update synapse's state synapseData = self.temporalPooler.connections.dataForSynapse(synIdx) synapse.permanence.setForCurrStep(synapseData.permanence) if synapseData.permanence >= self.distalSynConnectedPerm: synapse.isConnected.setForCurrStep(True) else: synapse.isConnected.setForCurrStep(False) # Get cell given cell's index sourceColIdx = synapseData.presynapticCell / self.numCellsPerColumn sourceCellRelIdx = synapseData.presynapticCell % self.numCellsPerColumn sourceCell = self.columns[sourceColIdx].cells[sourceCellRelIdx] synapse.inputElem = sourceCell else: synapse.isRemoved.setForCurrStep(True) else: segment.isRemoved.setForCurrStep(True)
def _runLearnInference(self, n=30, w=15, columnDimensions=2048, numActiveColumnsPerInhArea=40, spSeed=1951, spVerbosity=0, numTrainingRecords=100, seed=42): # Instantiate two identical spatial pooler. One will be used only for # learning. The other will be trained with identical records, but with # random inference calls thrown in spLearnOnly = SpatialPooler( columnDimensions=(columnDimensions, 1), inputDimensions=(1, n), potentialRadius=n / 2, numActiveColumnsPerInhArea=numActiveColumnsPerInhArea, spVerbosity=spVerbosity, seed=spSeed, synPermInactiveDec=0.01, synPermActiveInc=0.2, synPermConnected=0.11, ) spLearnInfer = SpatialPooler( columnDimensions=(columnDimensions, 1), inputDimensions=(1, n), potentialRadius=n / 2, numActiveColumnsPerInhArea=numActiveColumnsPerInhArea, spVerbosity=spVerbosity, seed=spSeed, synPermInactiveDec=0.01, synPermActiveInc=0.2, synPermConnected=0.11, ) random.seed(seed) np.random.seed(seed) # Build up training set with numTrainingRecords patterns inputs = [] # holds post-encoded input patterns for i in xrange(numTrainingRecords): inputVector = np.zeros(n, dtype=realDType) inputVector[random.sample(xrange(n), w)] = 1 inputs.append(inputVector) # Train each SP with identical inputs startTime = time.time() random.seed(seed) np.random.seed(seed) for i in xrange(numTrainingRecords): if spVerbosity > 0: print "Input #%d" % i # TODO: See https://github.com/numenta/nupic/issues/2072 encodedInput = inputs[i] decodedOutput = np.zeros(columnDimensions) spLearnOnly.compute(encodedInput, learn=True, activeArray=decodedOutput) random.seed(seed) np.random.seed(seed) for i in xrange(numTrainingRecords): if spVerbosity > 0: print "Input #%d" % i # TODO: See https://github.com/numenta/nupic/issues/2072 encodedInput = inputs[i] decodedOutput = np.zeros(columnDimensions) spLearnInfer.compute(encodedInput, learn=True, activeArray=decodedOutput) print "\nElapsed time: %.2f seconds\n" % (time.time() - startTime) # Test that both SP"s are identical by checking learning stats # A more in depth test would check all the coincidences, duty cycles, etc. # ala tpDiff # Edit: spDiff has been written as an in depth tester of the spatial pooler learnOnlyStats = spLearnOnly.getLearningStats() learnInferStats = spLearnInfer.getLearningStats() success = True # Check that the two spatial poolers are equivalent after the same training. success = success and spDiff(spLearnInfer, spLearnOnly) self.assertTrue(success) # Make sure that the pickled and loaded SPs are equivalent. spPickle = pickle.dumps(spLearnOnly, protocol=0) spLearnOnlyLoaded = pickle.loads(spPickle) success = success and spDiff(spLearnOnly, spLearnOnlyLoaded) self.assertTrue(success) for k in learnOnlyStats.keys(): if learnOnlyStats[k] != learnInferStats[k]: success = False print "Stat", k, "is different:", learnOnlyStats[ k], learnInferStats[k] self.assertTrue(success) if success: print "Test succeeded"
from nupic.support.unittesthelpers.algorithm_test_helpers import convertSP import numpy # just for debugging # Instantiate our spatial pooler sp = SpatialPooler( inputDimensions= (32, 32), # Size of image patch columnDimensions = (32, 32), potentialRadius = 10000, # Ensures 100% potential pool potentialPct = 0.8, globalInhibition = True, localAreaDensity = -1, # Using numActiveColumnsPerInhArea #localAreaDensity = 0.02, # one percent of columns active at a time #numActiveColumnsPerInhArea = -1, # Using percentage instead numActiveColumnsPerInhArea = 64, # All input activity can contribute to feature output stimulusThreshold = 0, synPermInactiveDec = 0.001, synPermActiveInc = 0.001, synPermConnected = 0.3, minPctOverlapDutyCycle=0.001, minPctActiveDutyCycle=0.001, dutyCyclePeriod=1000, maxBoost = 1.0, seed = 1956, # The seed that Grok uses spVerbosity = 1) # Instantiate the spatial pooler test bench. tb = VisionTestBench(sp) # Instantiate the classifier
def initialize(self): """ Initialize this node. """ if len(self.children) == 0: QtGui.QMessageBox.warning( None, "Warning", "Region '" + self.name + "' does not have any child!") return Node.initialize(self) for child in self.children: child.initialize() # Create the input map # An input map is a set of input elements (cells or sensor bits) that can be are grouped or combined # For example, if we have 2 children (#1 and #2) with dimensions 6 and 12 respectively, # a grouped input map would be something like: # 111111222222222222 # while a combined one would be something like: # 122122122122122122 self._inputMap = [] sumDimension = 0 if self.inputMapType == InputMapType.grouped: elemIdx = 0 for child in self.children: dimension = child.width * child.height sumDimension += dimension # Arrange input from child into input map of this region if child.type == NodeType.region: for column in child.columns: inputElem = column.cells[0] self._inputMap.append(inputElem) else: for bit in child.bits: inputElem = bit self._inputMap.append(inputElem) elemIdx += 1 elif self.inputMapType == InputMapType.combined: # Get the overall dimension and the minimum dimension among all children minDimension = self.children[0].width * self.children[0].height for child in self.children: dimension = child.width * child.height sumDimension += dimension if dimension < minDimension: minDimension = dimension # Use the minimum dimension as a multiplication common factor to determine the frequency of each child element in a sequence frequencies = [] nextIdx = [] for child in self.children: dimension = child.width * child.height if dimension % minDimension == 0: frequency = dimension / minDimension frequencies.append(frequency) nextIdx.append(0) else: QtGui.QMessageBox.warning( None, "Warning", "Children dimensions should have a common multiple factor!" ) return # Distribute alternatively child elements into input map according to their frequencies childIdx = 0 for elemIdx in range(sumDimension): if childIdx == len(self.children): childIdx = 0 child = self.children[childIdx] # Start distribution taking in account the last inserted element i0 = nextIdx[childIdx] iN = i0 + frequencies[childIdx] nextIdx[childIdx] = iN for i in range(i0, iN): if child.type == NodeType.region: inputElem = child.columns[i].cells[0] self._inputMap.append(inputElem) else: inputElem = child.bits[i] self._inputMap.append(inputElem) # Alternate children childIdx += 1 # Initialize elements self.columns = [] colIdx = 0 for x in range(self.width): for y in range(self.height): column = Column() column.x = x column.y = y for z in range(self.numCellsPerColumn): cell = Cell() cell.index = (colIdx * self.numCellsPerColumn) + z cell.z = z column.cells.append(cell) self.columns.append(column) colIdx += 1 # Create Spatial Pooler instance with appropriate parameters self.spatialPooler = SpatialPooler( inputDimensions=(sumDimension, 1), columnDimensions=(self.width, self.height), potentialRadius=self.potentialRadius, potentialPct=self.potentialPct, globalInhibition=self.globalInhibition, localAreaDensity=self.localAreaDensity, numActiveColumnsPerInhArea=self.numActiveColumnsPerInhArea, stimulusThreshold=self.stimulusThreshold, synPermInactiveDec=self.proximalSynPermDecrement, synPermActiveInc=self.proximalSynPermIncrement, synPermConnected=self.proximalSynConnectedPerm, minPctOverlapDutyCycle=self.minPctOverlapDutyCycle, minPctActiveDutyCycle=self.minPctActiveDutyCycle, dutyCyclePeriod=self.dutyCyclePeriod, maxBoost=self.maxBoost, seed=-1, spVerbosity=False) # Create Temporal Pooler instance with appropriate parameters self.temporalPooler = TemporalPooler( columnDimensions=(self.width, self.height), cellsPerColumn=self.numCellsPerColumn, learningRadius=self.learningRadius, initialPermanence=self.distalSynInitialPerm, connectedPermanence=self.distalSynConnectedPerm, minThreshold=self.minThreshold, maxNewSynapseCount=self.maxNumNewSynapses, permanenceIncrement=self.distalSynPermIncrement, permanenceDecrement=self.distalSynPermDecrement, activationThreshold=self.activationThreshold, seed=42) return True
class Region(Node): """ A class only to group properties related to regions. """ #region Constructor def __init__(self, name): """ Initializes a new instance of this class. """ Node.__init__(self, name, NodeType.region) #region Instance fields self.columns = [] """List of columns that compose this region""" self._inputMap = [] """An array representing the input map for this region.""" #region Spatial Parameters self.enableSpatialLearning = True """Switch for spatial learning""" self.potentialRadius = 0 """This parameter determines the extent of the input that each column can potentially be connected to. This can be thought of as the input bits that are visible to each column, or a 'receptiveField' of the field of vision. A large enough value will result in 'global coverage', meaning that each column can potentially be connected to every input bit. This parameter defines a square (or hyper square) area: a column will have a max square potential pool with sides of length 2 * potentialRadius + 1.""" self.potentialPct = 0.5 """The percent of the inputs, within a column's potential radius, that a column can be connected to. If set to 1, the column will be connected to every input within its potential radius. This parameter is used to give each column a unique potential pool when a large potentialRadius causes overlap between the columns. At initialization time we choose ((2*potentialRadius + 1)^(# inputDimensions) * potentialPct) input bits to comprise the column's potential pool.""" self.globalInhibition = False """If true, then during inhibition phase the winning columns are selected as the most active columns from the region as a whole. Otherwise, the winning columns are selected with respect to their local neighborhoods. Using global inhibition boosts performance x60.""" self.localAreaDensity = -1.0 """The desired density of active columns within a local inhibition area (the size of which is set by the internally calculated inhibitionRadius, which is in turn determined from the average size of the connected potential pools of all columns). The inhibition logic will insure that at most N columns remain ON within a local inhibition area, where N = localAreaDensity * (total number of columns in inhibition area).""" self.numActiveColumnsPerInhArea = int(0.02 * (self.width * self.height)) """An alternate way to control the density of the active columns. If numActiveColumnsPerInhArea is specified then localAreaDensity must be less than 0, and vice versa. When using numActiveColumnsPerInhArea, the inhibition logic will insure that at most 'numActiveColumnsPerInhArea' columns remain ON within a local inhibition area (the size of which is set by the internally calculated inhibitionRadius, which is in turn determined from the average size of the connected receptive fields of all columns). When using this method, as columns learn and grow their effective receptive fields, the inhibitionRadius will grow, and hence the net density of the active columns will *decrease*. This is in contrast to the localAreaDensity method, which keeps the density of active columns the same regardless of the size of their receptive fields.""" self.stimulusThreshold = 0 """This is a number specifying the minimum number of synapses that must be on in order for a columns to turn ON. The purpose of this is to prevent noise input from activating columns. Specified as a percent of a fully grown synapse.""" self.proximalSynConnectedPerm = 0.10 """The default connected threshold. Any synapse whose permanence value is above the connected threshold is a "connected synapse", meaning it can contribute to the cell's firing.""" self.proximalSynPermIncrement = 0.1 """The amount by which an active synapse is incremented in each round. Specified as a percent of a fully grown synapse.""" self.proximalSynPermDecrement = 0.01 """The amount by which an inactive synapse is decremented in each round. Specified as a percent of a fully grown synapse.""" self.minPctOverlapDutyCycle = 0.001 """A number between 0 and 1.0, used to set a floor on how often a column should have at least stimulusThreshold active inputs. Periodically, each column looks at the overlap duty cycle of all other columns within its inhibition radius and sets its own internal minimal acceptable duty cycle to: minPctDutyCycleBeforeInh * max(other columns' duty cycles). On each iteration, any column whose overlap duty cycle falls below this computed value will get all of its permanence values boosted up by synPermActiveInc. Raising all permanences in response to a sub-par duty cycle before inhibition allows a cell to search for new inputs when either its previously learned inputs are no longer ever active, or when the vast majority of them have been "hijacked" by other columns.""" self.minPctActiveDutyCycle = 0.001 """A number between 0 and 1.0, used to set a floor on how often a column should be activate. Periodically, each column looks at the activity duty cycle of all other columns within its inhibition radius and sets its own internal minimal acceptable duty cycle to: minPctDutyCycleAfterInh * max(other columns' duty cycles). On each iteration, any column whose duty cycle after inhibition falls below this computed value will get its internal boost factor increased.""" self.dutyCyclePeriod = 1000 """The period used to calculate duty cycles. Higher values make it take longer to respond to changes in boost or synPerConnectedCell. Shorter values make it more unstable and likely to oscillate.""" self.maxBoost = 10.0 """The maximum overlap boost factor. Each column's overlap gets multiplied by a boost factor before it gets considered for inhibition. The actual boost factor for a column is number between 1.0 and maxBoost. A boost factor of 1.0 is used if the duty cycle is >= minOverlapDutyCycle, maxBoost is used if the duty cycle is 0, and any duty cycle in between is linearly extrapolated from these 2 endpoints.""" self.spSeed = -1 """Seed for generate random values""" #endregion #region Temporal Parameters self.enableTemporalLearning = True """Switch for temporal learning""" self.numCellsPerColumn = 10 """Number of cells per column. More cells, more contextual information""" self.distalSynInitialPerm = 0.11 """The initial permanence of an distal synapse.""" self.distalSynConnectedPerm = 0.50 """The default connected threshold. Any synapse whose permanence value is above the connected threshold is a "connected synapse", meaning it can contribute to the cell's firing.""" self.distalSynPermIncrement = 0.10 """The amount by which an active synapse is incremented in each round. Specified as a percent of a fully grown synapse.""" self.distalSynPermDecrement = 0.10 """The amount by which an inactive synapse is decremented in each round. Specified as a percent of a fully grown synapse.""" self.minThreshold = 8 """If the number of synapses active on a segment is at least this threshold, it is selected as the best matching cell in a bursing column.""" self.activationThreshold = 12 """If the number of active connected synapses on a segment is at least this threshold, the segment is said to be active.""" self.maxNumNewSynapses = 15 """The maximum number of synapses added to a segment during learning.""" self.tpSeed = 42 """Seed for generate random values""" #endregion self.spatialPooler = None """Spatial Pooler instance""" self.temporalPooler = None """Temporal Pooler instance""" #endregion #region Statistics properties self.statsPrecisionRate = 0. #endregion #endregion #region Methods def getColumn(self, x, y): """ Return the column located at given position """ column = self.columns[(y * self.width) + x] return column def getInputSize(self): """ Return the sum of sizes of all feeder nodes. """ sumSizes = 0 for feeder in Global.project.network.getFeederNodes(self): sumSizes += feeder.width * feeder.height return sumSizes def initialize(self): """ Initialize this node. """ # Check if this region has nodes that feed it numFeeders = len(Global.project.network.getFeederNodes(self)) if numFeeders == 0: QtGui.QMessageBox.warning(None, "Warning", "Region '" + self.name + "' does not have any child!") return # Initialize this node and the nodes that feed it Node.initialize(self) # Create the input map # An input map is a set of input elements (cells or sensor bits) that should are grouped # For example, if we have 2 nodes that feed this region (#1 and #2) with dimensions 6 and 12 respectively, # a input map would be something like: # 111111222222222222 self._inputMap = [] elemIdx = 0 for feeder in Global.project.network.getFeederNodes(self): # Arrange input from feeder into input map of this region if feeder.type == NodeType.region: for column in feeder.columns: inputElem = column.cells[0] self._inputMap.append(inputElem) else: for bit in feeder.bits: inputElem = bit self._inputMap.append(inputElem) elemIdx += 1 # Initialize elements self.columns = [] colIdx = 0 for x in range(self.width): for y in range(self.height): column = Column() column.x = x column.y = y for z in range(self.numCellsPerColumn): cell = Cell() cell.index = (colIdx * self.numCellsPerColumn) + z cell.z = z column.cells.append(cell) self.columns.append(column) colIdx += 1 # Create Spatial Pooler instance with appropriate parameters self.spatialPooler = SpatialPooler( inputDimensions = (self.getInputSize(), 1), columnDimensions = (self.width, self.height), potentialRadius = self.potentialRadius, potentialPct = self.potentialPct, globalInhibition = self.globalInhibition, localAreaDensity = self.localAreaDensity, numActiveColumnsPerInhArea = self.numActiveColumnsPerInhArea, stimulusThreshold = self.stimulusThreshold, synPermInactiveDec = self.proximalSynPermDecrement, synPermActiveInc = self.proximalSynPermIncrement, synPermConnected = self.proximalSynConnectedPerm, minPctOverlapDutyCycle = self.minPctOverlapDutyCycle, minPctActiveDutyCycle = self.minPctActiveDutyCycle, dutyCyclePeriod = self.dutyCyclePeriod, maxBoost = self.maxBoost, seed = self.spSeed, spVerbosity = False) # Create Temporal Pooler instance with appropriate parameters self.temporalPooler = TemporalPooler( columnDimensions = (self.width, self.height), cellsPerColumn = self.numCellsPerColumn, initialPermanence = self.distalSynInitialPerm, connectedPermanence = self.distalSynConnectedPerm, minThreshold = self.minThreshold, maxNewSynapseCount = self.maxNumNewSynapses, permanenceIncrement = self.distalSynPermIncrement, permanenceDecrement = self.distalSynPermDecrement, activationThreshold = self.activationThreshold, seed = self.tpSeed) return True def nextStep(self): """ Perfoms actions related to time step progression. """ Node.nextStep(self) for column in self.columns: column.nextStep() # Get input from sensors or lower regions and put into a single input map. input = self.getInput() # Send input to Spatial Pooler and get processed output (i.e. the active columns) # First initialize the vector for representing the current record columnDimensions = (self.width, self.height) columnNumber = numpy.array(columnDimensions).prod() activeColumns = numpy.zeros(columnNumber) self.spatialPooler.compute(input, self.enableSpatialLearning, activeColumns) # Send active columns to Temporal Pooler and get processed output (i.e. the predicting cells) # First convert active columns from float array to integer set activeColumnsSet = set() for colIdx in range(len(activeColumns)): if activeColumns[colIdx] == 1: activeColumnsSet.add(colIdx) self.temporalPooler.compute(activeColumnsSet, self.enableTemporalLearning) # Update elements regarding spatial pooler self.updateSpatialElements(activeColumns) # Update elements regarding temporal pooler self.updateTemporalElements() # Get the predicted values self.getPredictions() #TODO: self._output = self.temporalPooler.getPredictedState() def getPredictions(self): """ Get the predicted values after an iteration. """ for feeder in Global.project.network.getFeederNodes(self): feeder.getPredictions() def calculateStatistics(self): """ Calculate statistics after an iteration. """ # The region's prediction precision is the average between the nodes that feed it precisionRate = 0. numFeeders = 0 for feeder in Global.project.network.getFeederNodes(self): precisionRate += feeder.statsPrecisionRate numFeeders += 1 self.statsPrecisionRate = precisionRate / numFeeders for column in self.columns: column.calculateStatistics() def getInput(self): """ Get input from sensors or lower regions and put into a single input map. """ # Initialize the vector for representing the current input map inputList = [] for inputElem in self._inputMap: if inputElem.isActive.atCurrStep(): inputList.append(1) else: inputList.append(0) input = numpy.array(inputList) return input def updateSpatialElements(self, activeColumns): """ Update elements regarding spatial pooler """ # Update proximal segments and synapses according to active columns for colIdx in range(len(self.columns)): column = self.columns[colIdx] # Update proximal segment segment = column.segment if activeColumns[colIdx] == 1: segment.isActive.setForCurrStep(True) else: segment.isActive.setForCurrStep(False) # Check if proximal segment is predicted by check if the column has any predicted cell for cell in column.cells: if cell.index in self.temporalPooler.predictiveCells: segment.isPredicted.setForCurrStep(True) # Update proximal synapses if segment.isActive.atCurrStep() or segment.isPredicted.atCurrStep(): permanencesSynapses = [] self.spatialPooler.getPermanence(colIdx, permanencesSynapses) connectedSynapses = [] self.spatialPooler.getConnectedSynapses(colIdx, connectedSynapses) for synIdx in range(len(permanencesSynapses)): # Get the proximal synapse given its position in the input map # Create a new one if it doesn't exist synapse = segment.getSynapse(synIdx) # Update proximal synapse if permanencesSynapses[synIdx] > 0.: if synapse == None: # Create a new synapse to a input element # An input element is a column if feeder is a region # or then a bit if feeder is a sensor synapse = Synapse() synapse.inputElem = self._inputMap[synIdx] synapse.indexSP = synIdx segment.synapses.append(synapse) # Update state synapse.isRemoved.setForCurrStep(False) synapse.permanence.setForCurrStep(permanencesSynapses[synIdx]) if connectedSynapses[synIdx] == 1: synapse.isConnected.setForCurrStep(True) else: synapse.isConnected.setForCurrStep(False) else: if synapse != None: synapse.isRemoved.setForCurrStep(True) def updateTemporalElements(self): """ Update elements regarding temporal pooler """ # Update cells, distal segments and synapses according to active columns for colIdx in range(len(self.columns)): column = self.columns[colIdx] # Mark proximal segment and its connected synapses as predicted if column.segment.isPredicted.atCurrStep(): for synapse in column.segment.synapses: if synapse.isConnected.atCurrStep(): synapse.isPredicted.setForCurrStep(True) synapse.inputElem.isPredicted.setForCurrStep(True) # Mark proximal segment and its connected synapses that were predicted but are not active now if column.segment.isPredicted.atPreviousStep(): if not column.segment.isActive.atCurrStep(): column.segment.isFalselyPredicted.setForCurrStep(True) for synapse in column.segment.synapses: if (synapse.isPredicted.atPreviousStep() and not synapse.isConnected.atCurrStep()) or (synapse.isConnected.atCurrStep() and synapse.inputElem.isFalselyPredicted.atCurrStep()): synapse.isFalselyPredicted.setForCurrStep(True) for cell in column.cells: cellIdx = cell.index # Update cell's states if cellIdx in self.temporalPooler.winnerCells: cell.isLearning.setForCurrStep(True) if cellIdx in self.temporalPooler.activeCells: cell.isActive.setForCurrStep(True) if cellIdx in self.temporalPooler.predictiveCells: cell.isPredicted.setForCurrStep(True) if cell.isPredicted.atPreviousStep() and not cell.isActive.atCurrStep(): cell.isFalselyPredicted.setForCurrStep(True) # Get the indexes of the distal segments of this cell segmentsForCell = self.temporalPooler.connections.segmentsForCell(cellIdx) # Add the segments that appeared after last iteration for segIdx in segmentsForCell: # Check if segment already exists in the cell segFound = False for segment in cell.segments: if segment.indexTP == segIdx: segFound = True break # If segment is new, add it to cell if not segFound: segment = Segment(SegmentType.distal) segment.indexTP = segIdx cell.segments.append(segment) # Update distal segments for segment in cell.segments: segIdx = segment.indexTP # If segment not found in segments indexes returned in last iteration mark it as removed if segIdx in segmentsForCell: # Update segment's state if segIdx in self.temporalPooler.activeSegments: segment.isActive.setForCurrStep(True) else: segment.isActive.setForCurrStep(False) # Get the indexes of the synapses of this segment synapsesForSegment = self.temporalPooler.connections.synapsesForSegment(segIdx) # Add the synapses that appeared after last iteration for synIdx in synapsesForSegment: # Check if synapse already exists in the segment synFound = False for synapse in segment.synapses: if synapse.indexTP == synIdx: synFound = True break # If synapse is new, add it to segment if not synFound: synapse = Synapse() synapse.indexTP = synIdx segment.synapses.append(synapse) # Update synapses for synapse in segment.synapses: synIdx = synapse.indexTP # If synapse not found in synapses indexes returned in last iteration mark it as removed if synIdx in synapsesForSegment: # Update synapse's state (_, sourceCellAbsIdx, permanence) = self.temporalPooler.connections.dataForSynapse(synIdx) synapse.permanence.setForCurrStep(permanence) if permanence >= self.distalSynConnectedPerm: synapse.isConnected.setForCurrStep(True) else: synapse.isConnected.setForCurrStep(False) # Get cell given cell's index sourceColIdx = sourceCellAbsIdx / self.numCellsPerColumn sourceCellRelIdx = sourceCellAbsIdx % self.numCellsPerColumn sourceCell = self.columns[sourceColIdx].cells[sourceCellRelIdx] synapse.inputElem = sourceCell else: synapse.isRemoved.setForCurrStep(True) else: segment.isRemoved.setForCurrStep(True)
if __name__ == "__main__": # Get training images and convert them to vectors. trainingImages, trainingTags = data.getImagesAndTags(trainingDataset) trainingVectors = encoder.imagesToVectors(trainingImages) # Instantiate the python spatial pooler sp = SpatialPooler( inputDimensions=32**2, # Size of image patch columnDimensions=16, # Number of potential features potentialRadius=10000, # Ensures 100% potential pool potentialPct=1, # Neurons can connect to 100% of input globalInhibition=True, localAreaDensity=-1, # Using numActiveColumnsPerInhArea #localAreaDensity = 0.02, # one percent of columns active at a time #numActiveColumnsPerInhArea = -1, # Using percentage instead numActiveColumnsPerInhArea=1, # Only one feature active at a time # All input activity can contribute to feature output stimulusThreshold=0, synPermInactiveDec=0.3, synPermActiveInc=0.3, synPermConnected=0.3, # Connected threshold maxBoost=2, seed=1956, # The seed that Grok uses spVerbosity=1) # Instantiate the spatial pooler test bench. tb = VisionTestBench(sp) # Instantiate the classifier clf = exactMatch()