def initModules(self, categories, inputIdx): modulesNames = { 'wordSP', 'wordTM', 'actionSP', 'actionTM', 'generalTM' } if (self.modulesParams is not None) and\ (set(self.modulesParams) == modulesNames): self.modulesParams['wordSP'].update(self.defaultWordSPParams) self.modulesParams['wordTM'].update(self.defaultWordTMParams) self.modulesParams['actionSP'].update(self.defaultActionSPParams) self.modulesParams['actionTM'].update(self.defaultActionTMParams) self.wordSP = SpatialPooler(**self.modulesParams['wordSP']) self.wordTM = TemporalMemory(**self.modulesParams['wordTM']) self.actionSP = SpatialPooler(**self.modulesParams['actionSP']) self.actionTM = TemporalMemory(**self.modulesParams['actionTM']) defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.modulesParams['generalTM'].update(defaultGeneralTMParams) self.generalTM = TemporalMemory(**self.modulesParams['generalTM']) print("Using external Parameters!") else: self.wordSP = SpatialPooler(**self.defaultWordSPParams) self.wordTM = TemporalMemory(**self.defaultWordTMParams) self.actionSP = SpatialPooler(**self.defaultActionSPParams) self.actionTM = TemporalMemory(**self.defaultActionTMParams) print("External parameters invalid or not found, using"\ " the default ones") defaultGeneralTMParams = { 'columnDimensions': (2, max(self.wordTM.numberOfCells(), self.actionTM.numberOfCells())), 'seed': self.tmSeed } self.generalTM = TemporalMemory(**defaultGeneralTMParams) self.classifier = CLAClassifierCond(steps=[1, 2, 3], alpha=0.1, actValueAlpha=0.3, verbosity=0) self.startPointOverlap = CommonOverlap('==', 1, self.actionTM.columnDimensions, threshold=0.5)
def main(): # Instantiate our spatial pooler sp = SpatialPooler( inputDimensions=32**2, # Size of image patch columnDimensions=16, # Number of potential features potentialRadius=10000, # Ensures 100% potential pool potentialPct=1, # Neurons can connect to 100% of input globalInhibition=True, numActiveColumnsPerInhArea=1, # Only one feature active at a time # All input activity can contribute to feature output stimulusThreshold=0, synPermInactiveDec=0.1, synPermActiveInc=0.1, synPermConnected=0.1, # Connected threshold maxBoost=3, seed=1956, # The seed that Grok uses spVerbosity=1) viewer = SPViewer(sp, screenWidth=512, screenHeight=600, imagePath='data/Image2.jpg', patchSide=32, patchOverlapPercent=0, epochCount=40, replayDelay=.1) viewer.run() finalWindow = viewer.screen pygame.image.save(finalWindow, "screenshot.jpg")
def __init__(self, config): # Calculate the size of input and col space inputsize = np.array(config['inputDimensions']).prod() colsize = np.array(config['columnDimensions']).prod() # save colsize and data type self.colsize = colsize self.datatype = config['uintType'] self.numIterations = config['numIterations'] # setup the pooler and reference to active column holder self.sp = SpatialPooler( inputDimensions=config['inputDimensions'], columnDimensions=config['columnDimensions'], potentialRadius=int(config['potentialRadius'] * inputsize), numActiveColumnsPerInhArea=math.ceil( config['amountActiveCols'] * colsize), globalInhibition=config['inhibition'] ) # reference to active columns set that is output of the spatial pooler self.activeColumns = np.zeros(colsize, config['uintType']) # setup the temporal pooler self.tm = TemporalMemory( columnDimensions=config['columnDimensions'], cellsPerColumn=config['cellsPerColumn'] )
def initModules(self, categories, inputIdx): modulesNames = {'generalSP', 'generalTM'} nWords = len(categories[inputIdx['wordInput']]) nActions = len(categories[inputIdx['actionInput']]) inputDimensions = max( self.wordEncoder.getWidth(), self.actionEncoder.getWidth() ) columnDimensions = (4 * max((nWords + nActions), len(self.trainingData)), ) defaultGeneralSPParams = { 'inputDimensions': inputDimensions, 'columnDimensions': columnDimensions, 'seed': self.spSeed } defaultGeneralTMParams = { 'columnDimensions': columnDimensions, 'seed': self.tmSeed } if (self.modulesParams is not None) and\ (set(self.modulesParams) == modulesNames): self.modulesParams['generalSP'].update(defaultGeneralSPParams) self.modulesParams['generalTM'].update(defaultGeneralTMParams) self.generalSP = SpatialPooler(**self.modulesParams['generalSP']) self.generalTM = TemporalMemory(**self.modulesParams['generalTM']) else: self.generalSP = SpatialPooler(**defaultGeneralSPParams) self.generalTM = TemporalMemory(**defaultGeneralTMParams) self.classifier = CLAClassifierCond( steps=[1, 2, 3], alpha=0.1, actValueAlpha=0.3, verbosity=0 )
def run(): sp = SpatialPooler(inputDimensions=[10, 15], columnDimensions=[5, 10], potentialRadius=2, potentialPct=0.5, synPermInactiveDec=0.1, synPermActiveInc=0.1, synPermConnected=0.1, localAreaDensity=0.1, numActiveColumnsPerInhArea=-1, globalInhibition=True) inputArray = numpy.zeros(sp.getNumInputs()) activeArray = numpy.zeros(sp.getNumColumns()) Patcher().patchSP(sp) for i in range(100): generateInput(inputArray) sp.compute(inputArray, True, activeArray) print "Ran iteration:\t{0}".format(i)
def _create_network(self, mean=128): """ :param mean: int, the mean of the frame pix value, will be used in BASE_ENCODE. """ # some rulers of creating network # the product of the shape's two dimensions is equal to inputDimensions # columnDimensions equal to numberOfCols self.enc = MatrixEncoder(shape=self.shape, mean=mean) self.sp = SpatialPooler( inputDimensions=self.shape[0] * self.shape[1], columnDimensions=self.column_dimensions, potentialRadius=self.potential_radius, numActiveColumnsPerInhArea=self.numActive_columns_perInhArea, globalInhibition=self.global_inhibition, synPermActiveInc=self.syn_perm_active_inc, potentialPct=self.potential_pct, synPermInactiveDec=self.synPermInactiveDec, synPermConnected=self.synPermConnected, seed=self.sp_seed, localAreaDensity=self.localAreaDensity, stimulusThreshold=self.stimulusThreshold, maxBoost=self.maxBoost) self.tp = TP(numberOfCols=self.column_dimensions, cellsPerColumn=self.cells_per_column, initialPerm=self.initial_perm, connectedPerm=self.connected_perm, minThreshold=self.min_threshold, newSynapseCount=self.new_synapse_count, permanenceInc=self.permanence_inc, permanenceDec=self.permanence_dec, activationThreshold=self.activation_threshold, globalDecay=self.global_decay, burnIn=self.burn_in, pamLength=self.pam_length, maxSynapsesPerSegment=self.maxSynapsesPerSegment, maxSegmentsPerCell=self.maxSegmentsPerCell, seed=self.tp_seed, maxAge=self.maxAge)
# In[16]: enc = ScalarEncoder(n=24, w=3, minval=0, maxval=23, clipInput=True, forced=True) encoded_list = map(enc.encode, converted_list) print 'STARTING SPATIAL POOLING' # In[17]: sp = SpatialPooler(inputDimensions=(24,), columnDimensions=(4,), potentialRadius=15, numActiveColumnsPerInhArea=1, globalInhibition=True, synPermActiveInc=0.03, potentialPct=1.0) # In[18]: for column in xrange(4): connected = np.zeros((24,), dtype="int") sp.getConnectedSynapses(column, connected) print connected # In[19]:
def testSPFile(): """ Run test on the data file - the file has records previously encoded. """ spSize = 2048 spSet = 40 poolPct = 0.5 pattern = [50, 1000] doLearn = True PLOT_PRECISION = 100.0 distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1)) inputs = [] #file = open('~/Desktop/ExperimentResults/sampleArtificial.csv', 'rb') #elemSize = 400 #numSet = 42 #file = open('~/Desktop/ExperimentResults/sampleDataBasilOneField.csv', 'rb') #elemSize = 499 #numSet = 7 outdir = '~/Desktop/ExperimentResults/Basil100x21' inputFile = outdir+'.csv' file = open(inputFile, 'rb') elemSize = 100 numSet = 21 reader = csv.reader(file) for row in reader: input = np.array(map(float, row), dtype=realDType) if len(input.nonzero()[0]) != numSet: continue inputs.append(input.copy()) file.close() # Setup a SP sp = SpatialPooler( columnDimensions=(spSize, 1), inputDimensions=(1, elemSize), potentialRadius=elemSize/2, numActiveColumnsPerInhArea=spSet, spVerbosity=0, stimulusThreshold=0, synPermConnected=0.10, seed=1, potentialPct=poolPct, globalInhibition=True ) cleanPlot = False doLearn = False print 'Finished reading file, inputs/outputs to process =', len(inputs) size = len(inputs) for iter in xrange(100): print 'Iteration', iter # Learn if iter != 0: for learnRecs in xrange(pattern[0]): # TODO: See https://github.com/numenta/nupic/issues/2072 ind = np.random.random_integers(0, size-1, 1)[0] sp.compute(inputs[ind], learn=True, activeArray=outputs[ind]) # Test for _ in xrange(pattern[1]): rand1 = np.random.random_integers(0, size-1, 1)[0] rand2 = np.random.random_integers(0, size-1, 1)[0] sp.compute(inputs[rand1], learn=False, activeArray=output1) sp.compute(inputs[rand2], learn=False, activeArray=output2) outDist = (abs(output1-output2) > 0.1) intOutDist = int(outDist.sum()/2+0.1) inDist = (abs(inputs[rand1]-inputs[rand2]) > 0.1) intInDist = int(inDist.sum()/2+0.1) if intInDist != numSet or intOutDist != spSet: print rand1, rand2, '-', intInDist, intOutDist x = int(PLOT_PRECISION*intOutDist/spSet) y = int(PLOT_PRECISION*intInDist/numSet) if distribMatrix[x, y] < 0.1: distribMatrix[x, y] = 3 else: if distribMatrix[x, y] < 10: distribMatrix[x, y] += 1 if True: plt.imshow(distribMatrix, origin='lower', interpolation = "nearest") plt.ylabel('SP (%d/%d) distance in pct' % (spSize, spSet)) plt.xlabel('Input (%d/%d) distance in pct' % (elemSize, numSet)) title = 'SP distribution' title += ', iter = %d' % iter title += ', Pct =%f' % poolPct plt.suptitle(title, fontsize=12) #plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosArtData', '%s' % iter)) plt.savefig(os.path.join(outdir, '%s' % iter)) plt.clf() distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
def testSPNew(): """ New version of the test""" elemSize = 400 numSet = 42 addNear = True numRecords = 1000 wantPlot = False poolPct = 0.5 itr = 5 pattern = [60, 1000] doLearn = True start = 1 learnIter = 0 noLearnIter = 0 numLearns = 0 numTests = 0 numIter = 1 numGroups = 1000 PLOT_PRECISION = 100.0 distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1)) inputs = generateRandomInput(numGroups, elemSize, numSet) # Setup a SP sp = SpatialPooler( columnDimensions=(2048, 1), inputDimensions=(1, elemSize), potentialRadius=elemSize/2, numActiveColumnsPerInhArea=40, spVerbosity=0, stimulusThreshold=0, synPermConnected=0.12, seed=1, potentialPct=poolPct, globalInhibition=True ) cleanPlot = False for i in xrange(numRecords): input1 = getRandomWithMods(inputs, 4) if i % 2 == 0: input2 = getRandomWithMods(inputs, 4) else: input2 = input1.copy() input2 = modifyBits(input2, 21) inDist = (abs(input1-input2) > 0.1) intInDist = int(inDist.sum()/2+0.1) #print intInDist if start == 0: doLearn = True learnIter += 1 if learnIter == pattern[start]: numLearns += 1 start = 1 noLearnIter = 0 elif start == 1: doLearn = False noLearnIter += 1 if noLearnIter == pattern[start]: numTests += 1 start = 0 learnIter = 0 cleanPlot = True # TODO: See https://github.com/numenta/nupic/issues/2072 sp.compute(input1, learn=doLearn, activeArray=output1) sp.compute(input2, learn=doLearn, activeArray=output2) time.sleep(0.001) outDist = (abs(output1-output2) > 0.1) intOutDist = int(outDist.sum()/2+0.1) if not doLearn and intOutDist < 2 and intInDist > 10: """ sp.spVerbosity = 10 # TODO: See https://github.com/numenta/nupic/issues/2072 sp.compute(input1, learn=doLearn, activeArray=output1) sp.compute(input2, learn=doLearn, activeArray=output2) sp.spVerbosity = 0 print 'Elements has very small SP distance: %d' % intOutDist print output1.nonzero() print output2.nonzero() print sp._firingBoostFactors[output1.nonzero()[0]] print sp._synPermBoostFactors[output1.nonzero()[0]] print 'Input elements distance is %d' % intInDist print input1.nonzero() print input2.nonzero() sys.stdin.readline() """ if not doLearn: x = int(PLOT_PRECISION*intOutDist/40.0) y = int(PLOT_PRECISION*intInDist/42.0) if distribMatrix[x, y] < 0.1: distribMatrix[x, y] = 3 else: if distribMatrix[x, y] < 10: distribMatrix[x, y] += 1 #print i # If we don't want a plot, just continue if wantPlot and cleanPlot: plt.imshow(distribMatrix, origin='lower', interpolation = "nearest") plt.ylabel('SP (2048/40) distance in %') plt.xlabel('Input (400/42) distance in %') title = 'SP distribution' #if doLearn: # title += ', leaning ON' #else: # title += ', learning OFF' title += ', learn sets = %d' % numLearns title += ', test sets = %d' % numTests title += ', iter = %d' % numIter title += ', groups = %d' % numGroups title += ', Pct =%f' % poolPct plt.suptitle(title, fontsize=12) #plt.show() plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosNew', '%s' % i)) plt.clf() distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1)) cleanPlot = False
def testSP(): """ Run a SP test """ elemSize = 400 numSet = 42 addNear = True numRecords = 2 wantPlot = True poolPct = 0.5 itr = 1 doLearn = True while numRecords < 3: # Setup a SP sp = SpatialPooler( columnDimensions=(2048, 1), inputDimensions=(1, elemSize), potentialRadius=elemSize/2, numActiveColumnsPerInhArea=40, spVerbosity=0, stimulusThreshold=0, seed=1, potentialPct=poolPct, globalInhibition=True ) # Generate inputs using rand() inputs = generateRandomInput(numRecords, elemSize, numSet) if addNear: # Append similar entries (distance of 1) appendInputWithNSimilarValues(inputs, 42) inputSize = len(inputs) print 'Num random records = %d, inputs to process %d' % (numRecords, inputSize) # Run a number of iterations, with learning on or off, # retrieve results from the last iteration only outputs = np.zeros((inputSize,2048)) numIter = 1 if doLearn: numIter = itr for iter in xrange(numIter): for i in xrange(inputSize): time.sleep(0.001) if iter == numIter - 1: # TODO: See https://github.com/numenta/nupic/issues/2072 sp.compute(inputs[i], learn=doLearn, activeArray=outputs[i]) #print outputs[i].sum(), outputs[i] else: # TODO: See https://github.com/numenta/nupic/issues/2072 output = np.zeros(2048) sp.compute(inputs[i], learn=doLearn, activeArray=output) # Build a plot from the generated input and output and display it distribMatrix = generatePlot(outputs, inputs) # If we don't want a plot, just continue if wantPlot: plt.imshow(distribMatrix, origin='lower', interpolation = "nearest") plt.ylabel('SP (2048/40) distance in %') plt.xlabel('Input (400/42) distance in %') title = 'SP distribution' if doLearn: title += ', leaning ON' else: title += ', learning OFF' title += ', inputs = %d' % len(inputs) title += ', iterations = %d' % numIter title += ', poolPct =%f' % poolPct plt.suptitle(title, fontsize=12) plt.show() #plt.savefig(os.path.join('~/Desktop/ExperimentResults/videos5', '%s' % numRecords)) #plt.clf() numRecords += 1 return
numActiveBits) elif inputVectorType == 'dense': inputSize = 1000 inputVectors = generateDenseVectors(numInputVector, inputSize) elif inputVectorType == 'correlate-input': inputVectors = generateCorrelatedInputs() numInputVector, inputSize = inputVectors.shape else: raise ValueError columnNumber = 2048 sp = SpatialPooler((inputSize, 1), (columnNumber, 1), potentialRadius=int(0.5 * inputSize), numActiveColumnsPerInhArea=int(0.02 * columnNumber), globalInhibition=True, seed=1936, maxBoost=1, dutyCyclePeriod=1000, synPermActiveInc=0.001, synPermInactiveDec=0.001) inspectSpatialPoolerStats(sp, inputVectors, inputVectorType + "beforeTraining") # classification Accuracy before training noiseLevelList = np.linspace(0, 1.0, 21) accuracyBeforeTraining = classificationAccuracyVsNoise( sp, inputVectors, noiseLevelList) accuracyWithoutSP = classificationAccuracyVsNoise(None, inputVectors, noiseLevelList)
def setUp(self): self.sp = SpatialPooler(columnDimensions=[5], inputDimensions=[5])
def runHotgym(): timeOfDayEncoder = DateEncoder(timeOfDay=(21,1)) weekendEncoder = DateEncoder(weekend=21) scalarEncoder = RandomDistributedScalarEncoder(0.88) encodingWidth = timeOfDayEncoder.getWidth() \ + weekendEncoder.getWidth() \ + scalarEncoder.getWidth() sp = SpatialPooler( # How large the input encoding will be. inputDimensions=(encodingWidth), # How many mini-columns will be in the Spatial Pooler. columnDimensions=(2048), # What percent of the columns's receptive field is available for potential # synapses? potentialPct=0.85, # This means that the input space has no topology. globalInhibition=True, localAreaDensity=-1.0, # Roughly 2%, giving that there is only one inhibition area because we have # turned on globalInhibition (40 / 2048 = 0.0195) numActiveColumnsPerInhArea=40.0, # How quickly synapses grow and degrade. synPermInactiveDec=0.005, synPermActiveInc=0.04, synPermConnected=0.1, # boostStrength controls the strength of boosting. Boosting encourages # efficient usage of SP columns. boostStrength=3.0, # Random number generator seed. seed=1956, # Determines if inputs at the beginning and end of an input dimension should # be considered neighbors when mapping columns to inputs. wrapAround=False ) tm = TemporalMemory( # Must be the same dimensions as the SP columnDimensions=(2048, ), # How many cells in each mini-column. cellsPerColumn=32, # A segment is active if it has >= activationThreshold connected synapses # that are active due to infActiveState activationThreshold=16, initialPermanence=0.21, connectedPermanence=0.5, # Minimum number of active synapses for a segment to be considered during # search for the best-matching segments. minThreshold=12, # The max number of synapses added to a segment during learning maxNewSynapseCount=20, permanenceIncrement=0.1, permanenceDecrement=0.1, predictedSegmentDecrement=0.0, maxSegmentsPerCell=128, maxSynapsesPerSegment=32, seed=1960 ) classifier = SDRClassifierFactory.create() with open (_INPUT_FILE_PATH) as fin: reader = csv.reader(fin) headers = reader.next() reader.next() reader.next() for count, record in enumerate(reader): # Convert data string into Python date object. dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M") # Convert data value string into float. consumption = float(record[1]) # To encode, we need to provide zero-filled numpy arrays for the encoders # to populate. timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth()) weekendBits = numpy.zeros(weekendEncoder.getWidth()) consumptionBits = numpy.zeros(scalarEncoder.getWidth()) # Now we call the encoders create bit representations for each value. timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits) weekendEncoder.encodeIntoArray(dateString, weekendBits) scalarEncoder.encodeIntoArray(consumption, consumptionBits) # Concatenate all these encodings into one large encoding for Spatial # Pooling. encoding = numpy.concatenate( [timeOfDayBits, weekendBits, consumptionBits] ) # Create an array to represent active columns, all initially zero. This # will be populated by the compute method below. It must have the same # dimensions as the Spatial Pooler. activeColumns = numpy.zeros(2048) # Execute Spatial Pooling algorithm over input space. sp.compute(encoding, True, activeColumns) activeColumnIndices = numpy.nonzero(activeColumns)[0] # Execute Temporal Memory algorithm over active mini-columns. tm.compute(activeColumnIndices, learn=True) activeCells = tm.getActiveCells() # Get the bucket info for this input value for classification. bucketIdx = scalarEncoder.getBucketIndices(consumption)[0] # Run classifier to translate active cells back to scalar value. classifierResult = classifier.compute( recordNum=count, patternNZ=activeCells, classification={ "bucketIdx": bucketIdx, "actValue": consumption }, learn=True, infer=True ) # Print the best prediction for 1 step out. probability, value = sorted( zip(classifierResult[1], classifierResult["actualValues"]), reverse=True )[0] print("1-step: {:16} ({:4.4}%)".format(value, probability * 100))
def initialize(self): """ Initialize this node. """ if len(self.children) == 0: QtGui.QMessageBox.warning( None, "Warning", "Region '" + self.name + "' does not have any child!") return Node.initialize(self) for child in self.children: child.initialize() # Create the input map # An input map is a set of input elements (cells or sensor bits) that can be are grouped or combined # For example, if we have 2 children (#1 and #2) with dimensions 6 and 12 respectively, # a grouped input map would be something like: # 111111222222222222 # while a combined one would be something like: # 122122122122122122 self._inputMap = [] sumDimension = 0 if self.inputMapType == InputMapType.grouped: elemIdx = 0 for child in self.children: dimension = child.width * child.height sumDimension += dimension # Arrange input from child into input map of this region if child.type == NodeType.region: for column in child.columns: inputElem = column.cells[0] self._inputMap.append(inputElem) else: for bit in child.bits: inputElem = bit self._inputMap.append(inputElem) elemIdx += 1 elif self.inputMapType == InputMapType.combined: # Get the overall dimension and the minimum dimension among all children minDimension = self.children[0].width * self.children[0].height for child in self.children: dimension = child.width * child.height sumDimension += dimension if dimension < minDimension: minDimension = dimension # Use the minimum dimension as a multiplication common factor to determine the frequency of each child element in a sequence frequencies = [] nextIdx = [] for child in self.children: dimension = child.width * child.height if dimension % minDimension == 0: frequency = dimension / minDimension frequencies.append(frequency) nextIdx.append(0) else: QtGui.QMessageBox.warning( None, "Warning", "Children dimensions should have a common multiple factor!" ) return # Distribute alternatively child elements into input map according to their frequencies childIdx = 0 for elemIdx in range(sumDimension): if childIdx == len(self.children): childIdx = 0 child = self.children[childIdx] # Start distribution taking in account the last inserted element i0 = nextIdx[childIdx] iN = i0 + frequencies[childIdx] nextIdx[childIdx] = iN for i in range(i0, iN): if child.type == NodeType.region: inputElem = child.columns[i].cells[0] self._inputMap.append(inputElem) else: inputElem = child.bits[i] self._inputMap.append(inputElem) # Alternate children childIdx += 1 # Initialize elements self.columns = [] colIdx = 0 for x in range(self.width): for y in range(self.height): column = Column() column.x = x column.y = y for z in range(self.numCellsPerColumn): cell = Cell() cell.index = (colIdx * self.numCellsPerColumn) + z cell.z = z column.cells.append(cell) self.columns.append(column) colIdx += 1 # Create Spatial Pooler instance with appropriate parameters self.spatialPooler = SpatialPooler( inputDimensions=(sumDimension, 1), columnDimensions=(self.width, self.height), potentialRadius=self.potentialRadius, potentialPct=self.potentialPct, globalInhibition=self.globalInhibition, localAreaDensity=self.localAreaDensity, numActiveColumnsPerInhArea=self.numActiveColumnsPerInhArea, stimulusThreshold=self.stimulusThreshold, synPermInactiveDec=self.proximalSynPermDecrement, synPermActiveInc=self.proximalSynPermIncrement, synPermConnected=self.proximalSynConnectedPerm, minPctOverlapDutyCycle=self.minPctOverlapDutyCycle, minPctActiveDutyCycle=self.minPctActiveDutyCycle, dutyCyclePeriod=self.dutyCyclePeriod, maxBoost=self.maxBoost, seed=-1, spVerbosity=False) # Create Temporal Pooler instance with appropriate parameters self.temporalPooler = TemporalPooler( columnDimensions=(self.width, self.height), cellsPerColumn=self.numCellsPerColumn, learningRadius=self.learningRadius, initialPermanence=self.distalSynInitialPerm, connectedPermanence=self.distalSynConnectedPerm, minThreshold=self.minThreshold, maxNewSynapseCount=self.maxNumNewSynapses, permanenceIncrement=self.distalSynPermIncrement, permanenceDecrement=self.distalSynPermDecrement, activationThreshold=self.activationThreshold, seed=42) return True
def SP(**kwargs): """ @param inputDimensions: Comma separated list of the input encoder dimensions, i.e., (height, width, depth, ...). Ex: a one dimensional vector of length 100 is represented by (100), a 3 by 20 array is represented by (3,20) @param columnDimensions: Comma separated list of the pooler dimensions, i.e., (height, width, depth, ...). Ex: a one dimensional vector of length 100 is represented by (100), a 3 by 20 array is represented by (3,20) @param potentialRadius: The extent of the input to which each colum can potentially connect to. This can be though of as the input bits which the SP can see, or its receptive field. Scalar input which defines a square or hypersquare area with sides of length 2 * potentialRadius + 1. @param potentialPct: Scalar between 0 and 1 which represents the percent of inputs, within a column's potentialRadius, that the column can be connected to. If set to 1, the column will possibly be connected to every input within its potentialRadius @param globalInhibition: If True, then during ihibition the winning columns are those columns which are the most active colums within the entire region. Otherwise, the winning columns are slected with respect to the local neighborhoods, think of this like a receptive field. Using global inhibition boosts performace x60. @param localAreaDensity: ?? Not exactly sure what this does The desired density of active columns within a local inhibition area (the size of which is set by the internally calculated inhibitionRadius, which is in turn determined from the average size of the connected potential pools of all columns). The inhibition logic will insure that at most N columns remain ON within a local inhibition area, where N = localAreaDensity * (total number of columns in inhibition area). @param numActiveColumnsPerInhArea: This specifies the number of winning columns based on the number of connected synapses matching the input vector per inhibition area. For example if there was global inhibition and numActiveColumnsPerInhArea was set to 10, then the 10 columns with the most number of connected synpases matching the input would be the active columns. In other words active columns are the columns with the top X overlap scores. This is an alternate way to control the density of the active columns. If numActiveColumnsPerInhArea is specified then localAreaDensity must be less than 0, and vice versa. When using this method, as columns learn and grow their effective receptive fields, the inhibitionRadius will grow, and hence the net density of the active columns will *decrease*. This is in contrast to the localAreaDensity method, which keeps the density of active columns the same regardless of the size of their receptive fields. @param stimulusThreshold: Scalar value specifying the minimum number of ON synapses required for a column to be ON. Used to prevent input noise from activating a column Default is 0 @param synPermInactiveDec: Percent by which an inactive synapse is decremented in each round. An inactive synapse is a bit in the input vector connected to an active, aka winning, column but whose bit does not overlap with an ON bit in input vector. Default is 0.008 @param synPermActiveInc: Percent by which an active synapse is incremented in each round. An active synapse is a bit in the input vector connected to an active, aka winning, column, whose bit overlaps with an ON bit in input vector. Default value is 0.05 @param synPermConnected: Syanpse permanence connection threshold above which a synapse is considered a connected synapse. The SP will try to give a normal distribution of permanence values around this threshold, so that there are a lot of synapses whcih are primed to become connected or discoennected. Default is 0.1 @param minPctOverlapDutyCycle: Value between 0 and 1.0 which sets the floor on the freqeuncy with which a column has at least stimulusThreshold active inputs. Periodically, based on dutyCyclePeriod, each column looks at the overlap duty cycle of all other columns within its inhibition radius and sets its own minimal acceptable duty cycle to minPctDutyCycleBeforeInh * max(other columns' duty cycles) On each iteration, if a column's overlap duty cycle is below this value, its permanence values will be bosted by synPermActiveInc. Default is 0.001 @param dutyCyclePeriod: The period used to calculate duty cycles, where higher values mean a it takes a column longer to repond to changes in boostStrength or stimulusThreshold. Default is 1000 @oaram boostStrength: Float >= 0.0 which controls the strength of boosting. A value of 0, means no boosting. Boosting encurages columns to have similar activeDutyCycles as their nieghbors, leading to more efficient column use. However, too much boosting may also lead to SP output instability @param seed: Seed for the pseudo-random number generator. Default is -1 @param wrapAround: Determines if inputs at the begnning and end of an input dimension should be considered neighbors when mapping columns to inputs. Default is True """ return SpatialPooler(**kwargs)
def initModules(self, categories, inputIdx): modulesNames = {'wordSP', 'wordTM', 'actionSP', 'actionTM', 'generalSP', 'generalTM'} if (self.modulesParams is not None) and\ (set(self.modulesParams) == modulesNames): self.modulesParams['wordSP'].update(self.defaultWordSPParams) self.modulesParams['wordTM'].update(self.defaultWordTMParams) self.modulesParams['actionSP'].update(self.defaultActionSPParams) self.modulesParams['actionTM'].update(self.defaultActionTMParams) self.wordSP = SpatialPooler(**self.modulesParams['wordSP']) self.wordTM = TemporalMemory(**self.modulesParams['wordTM']) self.actionSP = SpatialPooler(**self.modulesParams['actionSP']) self.actionTM = TemporalMemory(**self.modulesParams['actionTM']) generalInputDimensions = max( self.wordTM.numberOfCells() + 1, self.actionTM.numberOfCells() + 1 ) generalColumnDimensions = (len(self.trainingData) * 3,) defaultGeneralSPParams = { 'inputDimensions': generalInputDimensions, 'columnDimensions': generalColumnDimensions, 'seed': self.spSeed } defaultGeneralTMParams = { 'columnDimensions': generalColumnDimensions, 'seed': self.tmSeed } self.modulesParams['generalSP'].update(defaultGeneralSPParams) self.modulesParams['generalTM'].update(defaultGeneralTMParams) self.generalSP = SpatialPooler(**self.modulesParams['generalSP']) self.generalTM = TemporalMemory(**self.modulesParams['generalTM']) print("Using external Parameters!") else: self.wordSP = SpatialPooler(**self.defaultWordSPParams) self.wordTM = TemporalMemory(**self.defaultWordTMParams) self.actionSP = SpatialPooler(**self.defaultActionSPParams) self.actionTM = TemporalMemory(**self.defaultActionTMParams) print("External parameters invalid or not found, using"\ " the default ones") generalInputDimensions = max( self.wordTM.numberOfCells() + 1, self.actionTM.numberOfCells() + 1 ) generalColumnDimensions = (len(self.trainingData) * 3,) defaultGeneralSPParams = { 'inputDimensions': generalInputDimensions, 'columnDimensions': generalColumnDimensions, 'seed': self.spSeed } defaultGeneralTMParams = { 'columnDimensions': generalColumnDimensions, 'seed': self.tmSeed } self.generalSP = SpatialPooler(**defaultGeneralSPParams) self.generalTM = TemporalMemory(**defaultGeneralTMParams) self.classifier = CLAClassifierCond( steps=[1, 2, 3], alpha=0.1, actValueAlpha=0.3, verbosity=0 )
def frequency(self, n=15, w=7, columnDimensions=2048, numActiveColumnsPerInhArea=40, stimulusThreshold=0, spSeed=1, spVerbosity=0, numColors=2, seed=42, minVal=0, maxVal=10, encoder='category', forced=True): """ Helper function that tests whether the SP predicts the most frequent record """ print "\nRunning SP overlap test..." print encoder, 'encoder,', 'Random seed:', seed, 'and', numColors, 'colors' #Setting up SP and creating training patterns # Instantiate Spatial Pooler spImpl = SpatialPooler( columnDimensions=(columnDimensions, 1), inputDimensions=(1, n), potentialRadius=n / 2, numActiveColumnsPerInhArea=numActiveColumnsPerInhArea, spVerbosity=spVerbosity, stimulusThreshold=stimulusThreshold, potentialPct=0.5, seed=spSeed, ) rnd.seed(seed) numpy.random.seed(seed) colors = [] coincs = [] reUsedCoincs = [] spOutput = [] patterns = set([]) # Setting up the encodings if encoder == 'scalar': enc = scalar.ScalarEncoder( name='car', w=w, n=n, minval=minVal, maxval=maxVal, periodic=False, forced=True ) # forced: it's strongly recommended to use w>=21, in the example we force skip the check for readibility for y in xrange(numColors): temp = enc.encode(rnd.random() * maxVal) colors.append(numpy.array(temp, dtype=realDType)) else: for y in xrange(numColors): sdr = numpy.zeros(n, dtype=realDType) # Randomly setting w out of n bits to 1 sdr[rnd.sample(xrange(n), w)] = 1 colors.append(sdr) # Training the sp print 'Starting to train the sp on', numColors, 'patterns' startTime = time.time() for i in xrange(numColors): # TODO: See https://github.com/numenta/nupic/issues/2072 spInput = colors[i] onCells = numpy.zeros(columnDimensions) spImpl.compute(spInput, learn=True, activeArray=onCells) spOutput.append(onCells.tolist()) activeCoincIndices = set(onCells.nonzero()[0]) # Checking if any of the active cells have been previously active reUsed = activeCoincIndices.intersection(patterns) if len(reUsed) == 0: # The set of all coincidences that have won at least once coincs.append((i, activeCoincIndices, colors[i])) else: reUsedCoincs.append((i, activeCoincIndices, colors[i])) # Adding the active cells to the set of coincs that have been active at # least once patterns.update(activeCoincIndices) if (i + 1) % 100 == 0: print 'Record number:', i + 1 print "Elapsed time: %.2f seconds" % (time.time() - startTime) print len(reUsedCoincs), "re-used coinc(s)," # Check if results match expectations summ = [] for z in coincs: summ.append( sum([len(z[1].intersection(y[1])) for y in reUsedCoincs])) zeros = len([x for x in summ if x == 0]) factor = max(summ) * len(summ) / sum(summ) if len(reUsed) < 10: self.assertLess( factor, 41, "\nComputed factor: %d\nExpected Less than %d" % (factor, 41)) self.assertLess( zeros, 0.99 * len(summ), "\nComputed zeros: %d\nExpected Less than %d" % (zeros, 0.99 * len(summ))) else: self.assertLess( factor, 8, "\nComputed factor: %d\nExpected Less than %d" % (factor, 8)) self.assertLess( zeros, 12, "\nComputed zeros: %d\nExpected Less than %d" % (zeros, 12))
def _initEncoder(self, w, n): self._SpatialPooler = SpatialPooler([w], [n], 403, 0.8, 1, -1.0, 40.0, 0)
from nupic.research.temporal_memory import TemporalMemory import numpy encoder1 = CategoryEncoder(5, ['a', 'b', 'c'], forced=True) encoder2 = CategoryEncoder(5, ['z', 'x', 'y', 'a'], forced=True) sp = SpatialPooler(inputDimensions=(2, max(encoder1.getWidth(), encoder2.getWidth())), columnDimensions=(2, 20), potentialRadius=12, potentialPct=0.5, globalInhibition=True, localAreaDensity=-1.0, numActiveColumnsPerInhArea=5.0, stimulusThreshold=0, synPermInactiveDec=0.1, synPermActiveInc=0.1, synPermConnected=0.1, minPctOverlapDutyCycle=0.1, minPctActiveDutyCycle=0.1, dutyCyclePeriod=10, maxBoost=3, seed=42, spVerbosity=0) tm = TemporalMemory( columnDimensions=sp.getColumnDimensions(), initialPermanence=0.4, connectedPermanence=0.5, minThreshold=4,
from nupic.support.unittesthelpers.algorithm_test_helpers import convertSP import numpy # just for debugging # Instantiate our spatial pooler sp = SpatialPooler( inputDimensions= (32, 32), # Size of image patch columnDimensions = (32, 32), potentialRadius = 10000, # Ensures 100% potential pool potentialPct = 0.8, globalInhibition = True, localAreaDensity = -1, # Using numActiveColumnsPerInhArea #localAreaDensity = 0.02, # one percent of columns active at a time #numActiveColumnsPerInhArea = -1, # Using percentage instead numActiveColumnsPerInhArea = 64, # All input activity can contribute to feature output stimulusThreshold = 0, synPermInactiveDec = 0.001, synPermActiveInc = 0.001, synPermConnected = 0.3, minPctOverlapDutyCycle=0.001, minPctActiveDutyCycle=0.001, dutyCyclePeriod=1000, maxBoost = 1.0, seed = 1956, # The seed that Grok uses spVerbosity = 1) # Instantiate the spatial pooler test bench. tb = VisionTestBench(sp) # Instantiate the classifier
if __name__ == "__main__": # Get training images and convert them to vectors. trainingImages, trainingTags = data.getImagesAndTags(trainingDataset) trainingVectors = encoder.imagesToVectors(trainingImages) # Instantiate the python spatial pooler sp = SpatialPooler( inputDimensions=32**2, # Size of image patch columnDimensions=16, # Number of potential features potentialRadius=10000, # Ensures 100% potential pool potentialPct=1, # Neurons can connect to 100% of input globalInhibition=True, localAreaDensity=-1, # Using numActiveColumnsPerInhArea #localAreaDensity = 0.02, # one percent of columns active at a time #numActiveColumnsPerInhArea = -1, # Using percentage instead numActiveColumnsPerInhArea=1, # Only one feature active at a time # All input activity can contribute to feature output stimulusThreshold=0, synPermInactiveDec=0.3, synPermActiveInc=0.3, synPermConnected=0.3, # Connected threshold maxBoost=2, seed=1956, # The seed that Grok uses spVerbosity=1) # Instantiate the spatial pooler test bench. tb = VisionTestBench(sp) # Instantiate the classifier clf = exactMatch()
def runHotgym(numRecords): with open(_PARAMS_PATH, "r") as f: modelParams = yaml.safe_load(f)["modelParams"] enParams = modelParams["sensorParams"]["encoders"] spParams = modelParams["spParams"] tmParams = modelParams["tmParams"] timeOfDayEncoder = DateEncoder( timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"]) weekendEncoder = DateEncoder( weekend=enParams["timestamp_weekend"]["weekend"]) scalarEncoder = RandomDistributedScalarEncoder( enParams["consumption"]["resolution"]) encodingWidth = (timeOfDayEncoder.getWidth() + weekendEncoder.getWidth() + scalarEncoder.getWidth()) sp = SpatialPooler( # How large the input encoding will be. inputDimensions=(encodingWidth), # How many mini-columns will be in the Spatial Pooler. columnDimensions=(spParams["columnCount"]), # What percent of the columns"s receptive field is available for potential # synapses? potentialPct=spParams["potentialPct"], # This means that the input space has no topology. globalInhibition=spParams["globalInhibition"], localAreaDensity=spParams["localAreaDensity"], # Roughly 2%, giving that there is only one inhibition area because we have # turned on globalInhibition (40 / 2048 = 0.0195) numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"], # How quickly synapses grow and degrade. synPermInactiveDec=spParams["synPermInactiveDec"], synPermActiveInc=spParams["synPermActiveInc"], synPermConnected=spParams["synPermConnected"], # boostStrength controls the strength of boosting. Boosting encourages # efficient usage of SP columns. boostStrength=spParams["boostStrength"], # Random number generator seed. seed=spParams["seed"], # TODO: is this useful? # Determines if inputs at the beginning and end of an input dimension should # be considered neighbors when mapping columns to inputs. wrapAround=False ) tm = TemporalMemory( # Must be the same dimensions as the SP columnDimensions=(tmParams["columnCount"],), # How many cells in each mini-column. cellsPerColumn=tmParams["cellsPerColumn"], # A segment is active if it has >= activationThreshold connected synapses # that are active due to infActiveState activationThreshold=tmParams["activationThreshold"], initialPermanence=tmParams["initialPerm"], # TODO: This comes from the SP params, is this normal connectedPermanence=spParams["synPermConnected"], # Minimum number of active synapses for a segment to be considered during # search for the best-matching segments. minThreshold=tmParams["minThreshold"], # The max number of synapses added to a segment during learning maxNewSynapseCount=tmParams["newSynapseCount"], permanenceIncrement=tmParams["permanenceInc"], permanenceDecrement=tmParams["permanenceDec"], predictedSegmentDecrement=0.0, maxSegmentsPerCell=tmParams["maxSegmentsPerCell"], maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"], seed=tmParams["seed"] ) classifier = SDRClassifierFactory.create() results = [] with open(_INPUT_FILE_PATH, "r") as fin: reader = csv.reader(fin) headers = reader.next() reader.next() reader.next() for count, record in enumerate(reader): if count >= numRecords: break # Convert data string into Python date object. dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M") # Convert data value string into float. consumption = float(record[1]) # To encode, we need to provide zero-filled numpy arrays for the encoders # to populate. timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth()) weekendBits = numpy.zeros(weekendEncoder.getWidth()) consumptionBits = numpy.zeros(scalarEncoder.getWidth()) # Now we call the encoders create bit representations for each value. timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits) weekendEncoder.encodeIntoArray(dateString, weekendBits) scalarEncoder.encodeIntoArray(consumption, consumptionBits) # Concatenate all these encodings into one large encoding for Spatial # Pooling. encoding = numpy.concatenate( [timeOfDayBits, weekendBits, consumptionBits] ) # Create an array to represent active columns, all initially zero. This # will be populated by the compute method below. It must have the same # dimensions as the Spatial Pooler. activeColumns = numpy.zeros(spParams["columnCount"]) # Execute Spatial Pooling algorithm over input space. sp.compute(encoding, True, activeColumns) activeColumnIndices = numpy.nonzero(activeColumns)[0] # Execute Temporal Memory algorithm over active mini-columns. tm.compute(activeColumnIndices, learn=True) activeCells = tm.getActiveCells() # Get the bucket info for this input value for classification. bucketIdx = scalarEncoder.getBucketIndices(consumption)[0] # Run classifier to translate active cells back to scalar value. classifierResult = classifier.compute( recordNum=count, patternNZ=activeCells, classification={ "bucketIdx": bucketIdx, "actValue": consumption }, learn=True, infer=True ) # Print the best prediction for 1 step out. oneStepConfidence, oneStep = sorted( zip(classifierResult[1], classifierResult["actualValues"]), reverse=True )[0] print("1-step: {:16} ({:4.4}%)".format(oneStep, oneStepConfidence * 100)) results.append([oneStep, oneStepConfidence * 100, None, None]) return results
encodingWidth = timeOfDayEncoder.getWidth() \ + weekendEncoder.getWidth() \ + scalarEncoder.getWidth() sp = SpatialPooler( # How large the input encoding will be. inputDimensions=(encodingWidth), # How many mini-columns will be in the Spatial Pooler. columnDimensions=(2048), # What percent of the columns's receptive field is available for potential # synapses? potentialPct=0.85, # This means that the input space has no topology. globalInhibition=True, localAreaDensity=-1.0, # Roughly 2%, giving that there is only one inhibition area because we have # turned on globalInhibition (40 / 2048 = 0.0195) numActiveColumnsPerInhArea=40.0, # How quickly synapses grow and degrade. synPermInactiveDec=0.005, synPermActiveInc=0.04, synPermConnected=0.1, # boostStrength controls the strength of boosting. Boosting encourages # efficient usage of SP columns. boostStrength=3.0, # Random number generator seed. seed=1956, # Determines if inputs at the beginning and end of an input dimension should # be considered neighbors when mapping columns to inputs. wrapAround=False)
def _runLearnInference(self, n=30, w=15, columnDimensions=2048, numActiveColumnsPerInhArea=40, spSeed=1951, spVerbosity=0, numTrainingRecords=100, seed=42): # Instantiate two identical spatial pooler. One will be used only for # learning. The other will be trained with identical records, but with # random inference calls thrown in spLearnOnly = SpatialPooler( columnDimensions=(columnDimensions, 1), inputDimensions=(1, n), potentialRadius=n / 2, numActiveColumnsPerInhArea=numActiveColumnsPerInhArea, spVerbosity=spVerbosity, seed=spSeed, synPermInactiveDec=0.01, synPermActiveInc=0.2, synPermConnected=0.11, ) spLearnInfer = SpatialPooler( columnDimensions=(columnDimensions, 1), inputDimensions=(1, n), potentialRadius=n / 2, numActiveColumnsPerInhArea=numActiveColumnsPerInhArea, spVerbosity=spVerbosity, seed=spSeed, synPermInactiveDec=0.01, synPermActiveInc=0.2, synPermConnected=0.11, ) random.seed(seed) np.random.seed(seed) # Build up training set with numTrainingRecords patterns inputs = [] # holds post-encoded input patterns for i in xrange(numTrainingRecords): inputVector = np.zeros(n, dtype=realDType) inputVector[random.sample(xrange(n), w)] = 1 inputs.append(inputVector) # Train each SP with identical inputs startTime = time.time() random.seed(seed) np.random.seed(seed) for i in xrange(numTrainingRecords): if spVerbosity > 0: print "Input #%d" % i # TODO: See https://github.com/numenta/nupic/issues/2072 encodedInput = inputs[i] decodedOutput = np.zeros(columnDimensions) spLearnOnly.compute(encodedInput, learn=True, activeArray=decodedOutput) random.seed(seed) np.random.seed(seed) for i in xrange(numTrainingRecords): if spVerbosity > 0: print "Input #%d" % i # TODO: See https://github.com/numenta/nupic/issues/2072 encodedInput = inputs[i] decodedOutput = np.zeros(columnDimensions) spLearnInfer.compute(encodedInput, learn=True, activeArray=decodedOutput) print "\nElapsed time: %.2f seconds\n" % (time.time() - startTime) # Test that both SP"s are identical by checking learning stats # A more in depth test would check all the coincidences, duty cycles, etc. # ala tpDiff # Edit: spDiff has been written as an in depth tester of the spatial pooler learnOnlyStats = spLearnOnly.getLearningStats() learnInferStats = spLearnInfer.getLearningStats() success = True # Check that the two spatial poolers are equivalent after the same training. success = success and spDiff(spLearnInfer, spLearnOnly) self.assertTrue(success) # Make sure that the pickled and loaded SPs are equivalent. spPickle = pickle.dumps(spLearnOnly, protocol=0) spLearnOnlyLoaded = pickle.loads(spPickle) success = success and spDiff(spLearnOnly, spLearnOnlyLoaded) self.assertTrue(success) for k in learnOnlyStats.keys(): if learnOnlyStats[k] != learnInferStats[k]: success = False print "Stat", k, "is different:", learnOnlyStats[ k], learnInferStats[k] self.assertTrue(success) if success: print "Test succeeded"
def testInhibition(self): """ Test if the firing number of coincidences after inhibition equals spatial pooler numActiveColumnsPerInhArea. """ # Miscellaneous variables: # n, w: n, w of encoders # inputLen: Length of binary input # synPermConnected: Spatial pooler synPermConnected # synPermActiveInc: Spatial pooler synPermActiveInc # connectPct: Initial connect percentage of permanences # columnDimensions: Number of spatial pooler coincidences # numActiveColumnsPerInhArea: Spatial pooler numActiveColumnsPerInhArea # stimulusThreshold: Spatial pooler stimulusThreshold # spSeed: Spatial pooler for initial permanences # stimulusThresholdInh: Parameter for inhibition, default value 0.00001 # kDutyCycleFactor: kDutyCycleFactor for dutyCycleTieBreaker in # Inhibition # spVerbosity: Verbosity to print other sp initial parameters # testIter: Testing iterations n = 100 w = 15 inputLen = 300 columnDimensions = 2048 numActiveColumnsPerInhArea = 40 stimulusThreshold = 0 spSeed = 1956 stimulusThresholdInh = 0.00001 kDutyCycleFactor = 0.01 spVerbosity = 0 testIter = 100 spTest = SpatialPooler( columnDimensions=(columnDimensions, 1), inputDimensions=(1, inputLen), potentialRadius=inputLen / 2, numActiveColumnsPerInhArea=numActiveColumnsPerInhArea, spVerbosity=spVerbosity, stimulusThreshold=stimulusThreshold, seed=spSeed ) initialPermanence = spTest._initialPermanence() spTest._masterPotentialM, spTest._masterPermanenceM = ( spTest._makeMasterCoincidences(spTest.numCloneMasters, spTest._coincRFShape, spTest.potentialPct, initialPermanence, spTest.random)) spTest._updateInhibitionObj() boostFactors = numpy.ones(columnDimensions) for i in range(testIter): spTest._iterNum = i # random binary input input_ = numpy.zeros((1, inputLen)) nonzero = numpy.random.random(inputLen) input_[0][numpy.where (nonzero < float(w)/float(n))] = 1 # overlap step spTest._computeOverlapsFP(input_, stimulusThreshold=spTest.stimulusThreshold) spTest._overlaps *= boostFactors onCellIndices = numpy.where(spTest._overlaps > 0) spTest._onCells.fill(0) spTest._onCells[onCellIndices] = 1 denseOn = spTest._onCells # update _dutyCycleBeforeInh spTest.dutyCyclePeriod = min(i + 1, 1000) spTest._dutyCycleBeforeInh = ( (spTest.dutyCyclePeriod - 1) * spTest._dutyCycleBeforeInh +denseOn) / spTest.dutyCyclePeriod dutyCycleTieBreaker = spTest._dutyCycleAfterInh.copy() dutyCycleTieBreaker *= kDutyCycleFactor # inhibition step numOn = spTest._inhibitionObj.compute( spTest._overlaps + dutyCycleTieBreaker, spTest._onCellIndices, stimulusThresholdInh, # stimulusThresholdInh max(spTest._overlaps)/1000, # addToWinners ) # update _dutyCycleAfterInh spTest._onCells.fill(0) onCellIndices = spTest._onCellIndices[0:numOn] spTest._onCells[onCellIndices] = 1 denseOn = spTest._onCells spTest._dutyCycleAfterInh = (((spTest.dutyCyclePeriod-1) * spTest._dutyCycleAfterInh + denseOn) / spTest.dutyCyclePeriod) # learning step spTest._adaptSynapses(onCellIndices, [], input_) # update boostFactor spTest._updateBoostFactors() boostFactors = spTest._firingBoostFactors # update dutyCycle and boost if ((spTest._iterNum+1) % 50) == 0: spTest._updateInhibitionObj() spTest._updateMinDutyCycles( spTest._dutyCycleBeforeInh, spTest.minPctDutyCycleBeforeInh, spTest._minDutyCycleBeforeInh) spTest._updateMinDutyCycles( spTest._dutyCycleAfterInh, spTest.minPctDutyCycleAfterInh, spTest._minDutyCycleAfterInh) # test numOn and spTest.numActiveColumnsPerInhArea self.assertEqual(numOn, spTest.numActiveColumnsPerInhArea, "Error at input %s, actual numOn are: %i, " "numActivePerInhAre is: %s" % ( i, numOn, numActiveColumnsPerInhArea))
# Pick a combination of parameter values parameters.nextCombination() #parameters.nextRandomCombination() synPermConn = parameters.getValue("synPermConn") synPermDec = synPermConn * parameters.getValue("synPermDecFrac") synPermInc = synPermConn * parameters.getValue("synPermIncFrac") # Instantiate our spatial pooler sp = SpatialPooler( inputDimensions=(32, 32), # Size of image patch columnDimensions=(32, 32), potentialRadius=10000, # Ensures 100% potential pool potentialPct=0.8, globalInhibition=True, localAreaDensity=-1, # Using numActiveColumnsPerInhArea numActiveColumnsPerInhArea=64, # All input activity can contribute to feature output stimulusThreshold=0, synPermInactiveDec=synPermDec, synPermActiveInc=synPermInc, synPermConnected=synPermConn, maxBoost=1.0, seed=1956, # The seed that Grok uses spVerbosity=1) # Instantiate the spatial pooler test bench. tb = VisionTestBench(sp) # Instantiate the classifier clf = KNNClassifier() # Train the spatial pooler on trainingVectors.
def create_network(): enc = MatrixEncoder((64, 64)) sp = SpatialPooler(inputDimensions=4096, columnDimensions=1024) tp = TP(numberOfCols=1024) return enc, sp, tp
def initialize(self): """ Initialize this node. """ # Check if this region has nodes that feed it numFeeders = len(Global.project.network.getFeederNodes(self)) if numFeeders == 0: QtGui.QMessageBox.warning(None, "Warning", "Region '" + self.name + "' does not have any child!") return # Initialize this node and the nodes that feed it Node.initialize(self) # Create the input map # An input map is a set of input elements (cells or sensor bits) that should are grouped # For example, if we have 2 nodes that feed this region (#1 and #2) with dimensions 6 and 12 respectively, # a input map would be something like: # 111111222222222222 self._inputMap = [] elemIdx = 0 for feeder in Global.project.network.getFeederNodes(self): # Arrange input from feeder into input map of this region if feeder.type == NodeType.region: for column in feeder.columns: inputElem = column.cells[0] self._inputMap.append(inputElem) else: for bit in feeder.bits: inputElem = bit self._inputMap.append(inputElem) elemIdx += 1 # Initialize elements self.columns = [] colIdx = 0 for x in range(self.width): for y in range(self.height): column = Column() column.x = x column.y = y for z in range(self.numCellsPerColumn): cell = Cell() cell.index = (colIdx * self.numCellsPerColumn) + z cell.z = z column.cells.append(cell) self.columns.append(column) colIdx += 1 # Create Spatial Pooler instance with appropriate parameters self.spatialPooler = SpatialPooler( inputDimensions = (self.getInputSize(), 1), columnDimensions = (self.width, self.height), potentialRadius = self.potentialRadius, potentialPct = self.potentialPct, globalInhibition = self.globalInhibition, localAreaDensity = self.localAreaDensity, numActiveColumnsPerInhArea = self.numActiveColumnsPerInhArea, stimulusThreshold = self.stimulusThreshold, synPermInactiveDec = self.proximalSynPermDecrement, synPermActiveInc = self.proximalSynPermIncrement, synPermConnected = self.proximalSynConnectedPerm, minPctOverlapDutyCycle = self.minPctOverlapDutyCycle, minPctActiveDutyCycle = self.minPctActiveDutyCycle, dutyCyclePeriod = self.dutyCyclePeriod, maxBoost = self.maxBoost, seed = self.spSeed, spVerbosity = False) # Create Temporal Pooler instance with appropriate parameters self.temporalPooler = TemporalPooler( columnDimensions = (self.width, self.height), cellsPerColumn = self.numCellsPerColumn, initialPermanence = self.distalSynInitialPerm, connectedPermanence = self.distalSynConnectedPerm, minThreshold = self.minThreshold, maxNewSynapseCount = self.maxNumNewSynapses, permanenceIncrement = self.distalSynPermIncrement, permanenceDecrement = self.distalSynPermDecrement, activationThreshold = self.activationThreshold, seed = self.tpSeed) return True
exputils.deleteImages(outputImagePrefix + "*.gif") inputWidth = 50 inputHeight = 50 input = exputils.getRandom2dBoolMatrix(inputWidth, inputHeight) generateOutputMovie = True flatInput = input.flatten() flatInputLength = len(flatInput) print "initializing spacial pooler" spColumnHeight = flatInputLength spatialPooler = SpatialPooler(inputDimensions=flatInputLength, columnDimensions=spColumnHeight, potentialRadius=10, numActiveColumnsPerInhArea=1, globalInhibition=True, synPermActiveInc=0.03, potentialPct=1.00) print "spacial pooler initialization complete\n" printInitialSynapses = False if printInitialSynapses: print "spacial pooler initial randomly connected synapses:" for col in xrange(spColumnHeight): currentlyConnected = numpy.zeros(shape=flatInputLength, dtype="uint8") spatialPooler.getConnectedSynapses( column=col, connectedSynapses=currentlyConnected) print " ", currentlyConnected print "spatial pooler initialized\n"