def __init__(self, locationConfigs, L4Overrides=None, useGaussian=False): """ @param L4Overrides (dict) Custom parameters for L4 @param locationConfigs (sequence of dicts) Parameters for the location modules """ L4cellCount = 150*16 if useGaussian: self.L6aModules = [ createRatModule( anchorInputSize=L4cellCount, **config) for config in locationConfigs] else: self.L6aModules = [ Superficial2DLocationModule( anchorInputSize=L4cellCount, **config) for config in locationConfigs] L4Params = { "columnCount": 150, "cellsPerColumn": 16, "basalInputSize": (len(locationConfigs) * sum(module.numberOfCells() for module in self.L6aModules)) } if L4Overrides is not None: L4Params.update(L4Overrides) self.L4 = ApicalTiebreakPairMemory(**L4Params)
def __init__(self, locationConfigs, L4Overrides=None): """ @param L4Overrides (dict) Custom parameters for L4 @param locationConfigs (sequence of dicts) Parameters for the location modules """ L4Params = { "columnCount": 150, "cellsPerColumn": 16, "basalInputSize": (len(locationConfigs) * sum( np.prod(config["cellDimensions"]) for config in locationConfigs)) } if L4Overrides is not None: L4Params.update(L4Overrides) self.L4 = ApicalTiebreakPairMemory(**L4Params) self.L6aModules = [ Superficial2DLocationModule( anchorInputSize=self.L4.numberOfCells(), **config) for config in locationConfigs ]
def __init__(self, objects, objectPlacements, featureNames, locationConfigs, worldDimensions): self.objects = objects self.objectPlacements = objectPlacements self.worldDimensions = worldDimensions self.features = dict( (k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32")) for k in featureNames) self.locationModules = [ SuperficialLocationModule2D(anchorInputSize=150 * 32, **config) for config in locationConfigs ] self.inputLayer = ApicalTiebreakPairMemory( **{ "columnCount": 150, "cellsPerColumn": 32, "basalInputSize": 18 * sum( np.prod(config["cellDimensions"]) for config in locationConfigs), "apicalInputSize": 4096 }) self.objectLayer = ColumnPooler(**{"inputWidth": 150 * 32}) # Use these for classifying SDRs and for testing whether they're correct. self.locationRepresentations = { # Example: # (objectName, (top, left)): [0, 26, 54, 77, 101, ...] } self.inputRepresentations = { # Example: # (objectName, (top, left), featureName): [0, 26, 54, 77, 101, ...] } self.objectRepresentations = { # Example: # objectName: [14, 19, 54, 107, 201, ...] } self.locationInWorld = None self.maxSettlingTime = 10 self.monitors = {} self.nextMonitorToken = 1
class ApicalTiebreakTM_ApicalTiebreakTests(ApicalTiebreakTestBase, unittest.TestCase): """ Run the "apical tiebreak" tests on the ApicalTiebreakTemporalMemory. """ def constructTM(self, columnCount, basalInputSize, apicalInputSize, cellsPerColumn, initialPermanence, connectedPermanence, minThreshold, sampleSize, permanenceIncrement, permanenceDecrement, predictedSegmentDecrement, activationThreshold, seed): params = { "columnCount": columnCount, "cellsPerColumn": cellsPerColumn, "initialPermanence": initialPermanence, "connectedPermanence": connectedPermanence, "minThreshold": minThreshold, "sampleSize": sampleSize, "permanenceIncrement": permanenceIncrement, "permanenceDecrement": permanenceDecrement, "basalPredictedSegmentDecrement": predictedSegmentDecrement, "apicalPredictedSegmentDecrement": 0.0, "activationThreshold": activationThreshold, "seed": seed, "basalInputSize": basalInputSize, "apicalInputSize": apicalInputSize, } self.tm = ApicalTiebreakPairMemory(**params) def compute(self, activeColumns, basalInput, apicalInput, learn): activeColumns = np.array(sorted(activeColumns), dtype="uint32") basalInput = np.array(sorted(basalInput), dtype="uint32") apicalInput = np.array(sorted(apicalInput), dtype="uint32") self.tm.compute(activeColumns, basalInput=basalInput, basalGrowthCandidates=basalInput, apicalInput=apicalInput, apicalGrowthCandidates=apicalInput, learn=learn) def getActiveCells(self): return self.tm.getActiveCells() def getPredictedCells(self): return self.tm.getPredictedCells()
def __init__(self, diameter, objects, featureNames): self.diameter = diameter self.objects = objects # A grid of location SDRs. self.locations = dict( ((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32")) for i in xrange(diameter) for j in xrange(diameter)) # 8 transition SDRs -- one for each straight and diagonal direction. self.transitions = dict( ((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32")) for i in xrange(-1, 2) for j in xrange(-1, 2) if i != 0 or j != 0) self.features = dict( (k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32")) for k in featureNames) self.locationLayer = SingleLayerLocationMemory( **{ "cellCount": 1000, "deltaLocationInputSize": 1000, "featureLocationInputSize": 150 * 32, "sampleSize": 15, "activationThreshold": 10, "learningThreshold": 8, }) self.inputLayer = ApicalTiebreakPairMemory( **{ "columnCount": 150, "cellsPerColumn": 32, "basalInputSize": 1000, "apicalInputSize": 4096, }) self.objectLayer = ColumnPooler(**{"inputWidth": 150 * 32}) # Use these for classifying SDRs and for testing whether they're correct. self.inputRepresentations = {} self.objectRepresentations = {} self.learnedObjectPlacements = {} self.monitors = {} self.nextMonitorToken = 1
def __init__(self, locationConfigs, L4Overrides=None): """ @param L4Overrides (dict) Custom parameters for L4 @param locationConfigs (sequence of dicts) Parameters for the location modules """ L4Params = { "columnCount": 150, "cellsPerColumn": 16, "basalInputSize": (len(locationConfigs) * sum(np.prod(config["cellDimensions"]) for config in locationConfigs)) } if L4Overrides is not None: L4Params.update(L4Overrides) self.L4 = ApicalTiebreakPairMemory(**L4Params) self.L6aModules = [ Superficial2DLocationModule( anchorInputSize=self.L4.numberOfCells(), **config) for config in locationConfigs]
def __init__(self, locationConfigs, L4Overrides=None, bumpType="gaussian"): """ @param L4Overrides (dict) Custom parameters for L4 @param locationConfigs (sequence of dicts) Parameters for the location modules """ self.bumpType = bumpType L4cellCount = 150 * 16 if bumpType == "gaussian": self.L6aModules = [ createRatModuleFromCellCount(anchorInputSize=L4cellCount, **config) for config in locationConfigs ] elif bumpType == "gaussian2": self.L6aModules = [ createRatModuleFromReadoutResolution( anchorInputSize=L4cellCount, **config) for config in locationConfigs ] elif bumpType == "square": self.L6aModules = [ Superficial2DLocationModule(anchorInputSize=L4cellCount, **config) for config in locationConfigs ] else: raise ValueError("Invalid bumpType", bumpType) L4Params = { "columnCount": 150, "cellsPerColumn": 16, "basalInputSize": sum(module.numberOfCells() for module in self.L6aModules) } if L4Overrides is not None: L4Params.update(L4Overrides) self.L4 = ApicalTiebreakPairMemory(**L4Params)
def __init__(self, diameter, objects, featureNames): self.diameter = diameter self.objects = objects # A grid of location SDRs. self.locations = dict( ((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32")) for i in xrange(diameter) for j in xrange(diameter)) # 8 transition SDRs -- one for each straight and diagonal direction. self.transitions = dict( ((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32")) for i in xrange(-1, 2) for j in xrange(-1, 2) if i != 0 or j != 0) self.features = dict( (k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32")) for k in featureNames) self.locationLayer = SingleLayerLocationMemory(**{ "cellCount": 1000, "deltaLocationInputSize": 1000, "featureLocationInputSize": 150*32, "sampleSize": 15, "activationThreshold": 10, "learningThreshold": 8, }) self.inputLayer = ApicalTiebreakPairMemory(**{ "columnCount": 150, "cellsPerColumn": 32, "basalInputSize": 1000, "apicalInputSize": 4096, }) self.objectLayer = ColumnPooler(**{ "inputWidth": 150 * 32 }) # Use these for classifying SDRs and for testing whether they're correct. self.inputRepresentations = {} self.objectRepresentations = {} self.learnedObjectPlacements = {} self.monitors = {} self.nextMonitorToken = 1
def constructTM(self, columnCount, basalInputSize, apicalInputSize, cellsPerColumn, initialPermanence, connectedPermanence, minThreshold, sampleSize, permanenceIncrement, permanenceDecrement, predictedSegmentDecrement, activationThreshold, seed): params = { "columnCount": columnCount, "cellsPerColumn": cellsPerColumn, "initialPermanence": initialPermanence, "connectedPermanence": connectedPermanence, "minThreshold": minThreshold, "sampleSize": sampleSize, "permanenceIncrement": permanenceIncrement, "permanenceDecrement": permanenceDecrement, "basalPredictedSegmentDecrement": predictedSegmentDecrement, "apicalPredictedSegmentDecrement": 0.0, "activationThreshold": activationThreshold, "seed": seed, "basalInputSize": basalInputSize, "apicalInputSize": apicalInputSize, } self.tm = ApicalTiebreakPairMemory(**params)
def __init__(self, objects, objectPlacements, featureNames, locationConfigs, worldDimensions): self.objects = objects self.objectPlacements = objectPlacements self.worldDimensions = worldDimensions self.features = dict( (k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32")) for k in featureNames) self.locationModules = [SuperficialLocationModule2D(anchorInputSize=150*32, **config) for config in locationConfigs] self.inputLayer = ApicalTiebreakPairMemory(**{ "columnCount": 150, "cellsPerColumn": 32, "basalInputSize": 18 * sum(np.prod(config["cellDimensions"]) for config in locationConfigs), "apicalInputSize": 4096 }) self.objectLayer = ColumnPooler(**{ "inputWidth": 150 * 32 }) # Use these for classifying SDRs and for testing whether they're correct. self.locationRepresentations = { # Example: # (objectName, (top, left)): [0, 26, 54, 77, 101, ...] } self.inputRepresentations = { # Example: # (objectName, (top, left), featureName): [0, 26, 54, 77, 101, ...] } self.objectRepresentations = { # Example: # objectName: [14, 19, 54, 107, 201, ...] } self.locationInWorld = None self.maxSettlingTime = 10 self.monitors = {} self.nextMonitorToken = 1
def __init__(self, l4N, l4W, numModules, moduleDimensions, maxActivePerModule, l6ActivationThreshold): self.numModules = numModules self.moduleDimensions = moduleDimensions self._cellsPerModule = np.prod(moduleDimensions) self.maxActivePerModule = maxActivePerModule self.l4N = l4N self.l4W = l4W self.l6ActivationThreshold = l6ActivationThreshold self.l4TM = TemporalMemory( columnCount=l4N, basalInputSize=numModules * self._cellsPerModule, cellsPerColumn=4, #activationThreshold=int(numModules / 2) + 1, #reducedBasalThreshold=int(numModules / 2) + 1, activationThreshold=1, reducedBasalThreshold=1, initialPermanence=1.0, connectedPermanence=0.5, minThreshold=1, sampleSize=numModules, permanenceIncrement=1.0, permanenceDecrement=0.0, ) self.l6Connections = [ Connections(numCells=self._cellsPerModule) for _ in xrange(numModules) ] self.pooler = ColumnPooler(inputWidth=self.numModules * self._cellsPerModule, ) self.classifier = KNNClassifier(k=1, distanceMethod="rawOverlap") #self.classifier = KNNClassifier(k=1, distanceMethod="norm") # Active state self.activeL6Cells = [[] for _ in xrange(numModules)] self.activeL5Cells = [[] for _ in xrange(numModules)] self.predictedL6Cells = [set([]) for _ in xrange(numModules)] # Debug state self.activeL6BeforeMotor = [[] for _ in xrange(numModules)] self.l6ToL4Map = collections.defaultdict(list)
def __init__(self, l4N, l4W, numModules, moduleDimensions, maxActivePerModule, l6ActivationThreshold): self.numModules = numModules self.moduleDimensions = moduleDimensions self._cellsPerModule = np.prod(moduleDimensions) self.maxActivePerModule = maxActivePerModule self.l4N = l4N self.l4W = l4W self.l6ActivationThreshold = l6ActivationThreshold self.l4TM = TemporalMemory( columnCount=l4N, basalInputSize=numModules*self._cellsPerModule, cellsPerColumn=4, #activationThreshold=int(numModules / 2) + 1, #reducedBasalThreshold=int(numModules / 2) + 1, activationThreshold=1, reducedBasalThreshold=1, initialPermanence=1.0, connectedPermanence=0.5, minThreshold=1, sampleSize=numModules, permanenceIncrement=1.0, permanenceDecrement=0.0, ) self.l6Connections = [Connections(numCells=self._cellsPerModule) for _ in xrange(numModules)] self.pooler = ColumnPooler( inputWidth=self.numModules*self._cellsPerModule, ) self.classifier = KNNClassifier(k=1, distanceMethod="rawOverlap") #self.classifier = KNNClassifier(k=1, distanceMethod="norm") # Active state self.activeL6Cells = [[] for _ in xrange(numModules)] self.activeL5Cells = [[] for _ in xrange(numModules)] self.predictedL6Cells = [set([]) for _ in xrange(numModules)] # Debug state self.activeL6BeforeMotor = [[] for _ in xrange(numModules)] self.l6ToL4Map = collections.defaultdict(list)
def __init__(self, locationConfigs, L4Overrides=None, bumpType="gaussian"): """ @param L4Overrides (dict) Custom parameters for L4 @param locationConfigs (sequence of dicts) Parameters for the location modules """ self.bumpType = bumpType L4cellCount = 150*16 if bumpType == "gaussian": self.L6aModules = [ createRatModuleFromCellCount( anchorInputSize=L4cellCount, **config) for config in locationConfigs] elif bumpType == "gaussian2": self.L6aModules = [ createRatModuleFromReadoutResolution( anchorInputSize=L4cellCount, **config) for config in locationConfigs] elif bumpType == "square": self.L6aModules = [ Superficial2DLocationModule( anchorInputSize=L4cellCount, **config) for config in locationConfigs] else: raise ValueError("Invalid bumpType", bumpType) L4Params = { "columnCount": 150, "cellsPerColumn": 16, "basalInputSize": sum(module.numberOfCells() for module in self.L6aModules) } if L4Overrides is not None: L4Params.update(L4Overrides) self.L4 = ApicalTiebreakPairMemory(**L4Params)
def __init__( self, locationConfigs, # noqa: N803 L4Overrides=None, bumpType="gaussian"): """ @param L4Overrides (dict) Custom parameters for L4 @param locationConfigs (sequence of dicts) Parameters for the location modules """ self.bumpType = bumpType l4_cell_count = L4Overrides["columnCount"] * L4Overrides[ "cellsPerColumn"] if bumpType == "square": self.L6aModules = [ Superficial2DLocationModule(anchorInputSize=l4_cell_count, **config) for config in locationConfigs ] else: raise ValueError("Invalid bumpType", bumpType) l4_params = { "columnCount": 128, # Note overriding below "cellsPerColumn": 32, "basalInputSize": sum(module.numberOfCells() for module in self.L6aModules) } if L4Overrides is not None: l4_params.update(L4Overrides) self.L4 = ApicalTiebreakPairMemory(**l4_params)
def __init__(self, minX, maxX, minY, maxY, bottomUpInputSize, bottomUpOnBits, seed ): self.xEncoder = ScalarEncoder(5, minX, 10*maxX, n=75, forced=True) self.yEncoder = ScalarEncoder(5, minY, 10*maxY, n=75, forced=True) self.externalSize = self.xEncoder.getWidth()**2 self.externalOnBits = self.xEncoder.w**2 self.bottomUpInputSize = bottomUpInputSize self.bottomUpOnBits = bottomUpOnBits self.seed = seed self.trainingIterations = 0 self.testIterations = 0 self.maxPredictionError = 0 self.totalPredictionError = 0 self.numMissedPredictions = 0 self.tm = TM(columnCount = self.bottomUpInputSize, basalInputSize = self.externalSize, cellsPerColumn=4, initialPermanence=0.4, connectedPermanence=0.5, minThreshold= self.externalOnBits, sampleSize=40, permanenceIncrement=0.1, permanenceDecrement=0.00, activationThreshold=int(0.75*(self.externalOnBits+self.bottomUpOnBits)), basalPredictedSegmentDecrement=0.00, seed = self.seed )
def __init__(self, numLocations=25, numMinicolumns=15, numActiveMinicolumns=10, poolingThreshold=8, cellsPerColumn=8, segmentedProximal=True, segmentedPooling=True, minicolumnSDRs=None): self.numOperandCells = 100 self.numActiveOperandCells = 4 self.numResultCells = 100 self.numActiveResultCells = 4 self.numLocations = numLocations self.numActiveMinicolumns = numActiveMinicolumns self.contextOperandSDRs = createEvenlySpreadSDRs( numLocations, self.numOperandCells, self.numActiveOperandCells) self.resultSDRs = createEvenlySpreadSDRs( numLocations, self.numResultCells, self.numActiveResultCells) self.drivingOperandSDRs = createEvenlySpreadSDRs( numLocations, self.numOperandCells, self.numActiveOperandCells) if minicolumnSDRs is None: self.minicolumnSDRs = createEvenlySpreadSDRs( self.numLocations, numMinicolumns, numActiveMinicolumns) else: assert len(minicolumnSDRs) >= self.numLocations self.minicolumnSDRs = list(minicolumnSDRs) random.shuffle(self.minicolumnSDRs) self.minicolumnParams = { "cellCount": numMinicolumns, "inputSize": self.numOperandCells, "threshold": self.numActiveOperandCells, } if segmentedProximal: self.pairLayerProximalConnections = SegmentedForwardModel( **self.minicolumnParams) else: self.pairLayerProximalConnections = ForwardModel(**self.minicolumnParams) self.pairParams = { "columnCount": numMinicolumns, "initialPermanence": 1.0, "cellsPerColumn": cellsPerColumn, "basalInputSize": self.numOperandCells, "activationThreshold": self.numActiveOperandCells, "minThreshold": self.numActiveOperandCells, } self.pairLayer = ApicalTiebreakPairMemory(**self.pairParams) self.poolingParams = { "cellCount": self.numResultCells, "inputSize": self.pairLayer.numberOfCells(), "threshold": poolingThreshold, } if segmentedPooling: self.poolingLayer = SegmentedForwardModel(**self.poolingParams) else: self.poolingLayer = ForwardModel(**self.poolingParams)
class PoolOfPairsLocation1DExperiment(object): """ There are a lot of ways this experiment could choose to associate "operands" with results -- e.g. we could just do it randomly. This particular experiment assumes there are an equal number of "operand1", "operand2", and "result" values. It assigns each operand/result an index, and it relates these via: result = (operand1 + operand2) % numLocations Note that this experiment would be fundamentally no different if it used subtraction: result = (operand1 - operand2) % numLocations The resulting network would be identical, it's just our interpretation of the SDRs that would change. This experiment intentionally mimics a 1D space with wraparound, with operands/results representing 1D locations and offsets. You can think of this as: location2 = location1 + offset offset = location2 - location1 """ def __init__(self, numLocations=25, numMinicolumns=15, numActiveMinicolumns=10, poolingThreshold=8, cellsPerColumn=8, segmentedProximal=True, segmentedPooling=True, minicolumnSDRs=None): self.numOperandCells = 100 self.numActiveOperandCells = 4 self.numResultCells = 100 self.numActiveResultCells = 4 self.numLocations = numLocations self.numActiveMinicolumns = numActiveMinicolumns self.contextOperandSDRs = createEvenlySpreadSDRs( numLocations, self.numOperandCells, self.numActiveOperandCells) self.resultSDRs = createEvenlySpreadSDRs( numLocations, self.numResultCells, self.numActiveResultCells) self.drivingOperandSDRs = createEvenlySpreadSDRs( numLocations, self.numOperandCells, self.numActiveOperandCells) if minicolumnSDRs is None: self.minicolumnSDRs = createEvenlySpreadSDRs( self.numLocations, numMinicolumns, numActiveMinicolumns) else: assert len(minicolumnSDRs) >= self.numLocations self.minicolumnSDRs = list(minicolumnSDRs) random.shuffle(self.minicolumnSDRs) self.minicolumnParams = { "cellCount": numMinicolumns, "inputSize": self.numOperandCells, "threshold": self.numActiveOperandCells, } if segmentedProximal: self.pairLayerProximalConnections = SegmentedForwardModel( **self.minicolumnParams) else: self.pairLayerProximalConnections = ForwardModel(**self.minicolumnParams) self.pairParams = { "columnCount": numMinicolumns, "initialPermanence": 1.0, "cellsPerColumn": cellsPerColumn, "basalInputSize": self.numOperandCells, "activationThreshold": self.numActiveOperandCells, "minThreshold": self.numActiveOperandCells, } self.pairLayer = ApicalTiebreakPairMemory(**self.pairParams) self.poolingParams = { "cellCount": self.numResultCells, "inputSize": self.pairLayer.numberOfCells(), "threshold": poolingThreshold, } if segmentedPooling: self.poolingLayer = SegmentedForwardModel(**self.poolingParams) else: self.poolingLayer = ForwardModel(**self.poolingParams) def train(self): """ Train the pair layer and pooling layer. """ for iDriving, cDriving in enumerate(self.drivingOperandSDRs): minicolumnSDR = self.minicolumnSDRs[iDriving] self.pairLayerProximalConnections.associate(minicolumnSDR, cDriving) for iContext, cContext in enumerate(self.contextOperandSDRs): iResult = (iContext + iDriving) % self.numLocations cResult = self.resultSDRs[iResult] self.pairLayer.compute(minicolumnSDR, basalInput=cContext) cPair = self.pairLayer.getWinnerCells() self.poolingLayer.associate(cResult, cPair) def trainWithSpecificPairSDRs(self, pairLayerContexts): """ Train the pair layer and pooling layer, manually choosing which contexts each cell will encode (i.e. the pair layer's distal connections). @param pairLayerContexts (list of lists of lists of ints) iContext integers for each cell, grouped by minicolumn. For example, [[[1, 3], [2,4]], [[1, 2]]] would specify that cell 0 connects to location 1 and location 3, while cell 1 connects to locations 2 and 4, and cell 2 (in the second minicolumn) connects to locations 1 and 2. """ # Grow basal segments in the pair layer. for iMinicolumn, contextsByCell in enumerate(pairLayerContexts): for iCell, cellContexts in enumerate(contextsByCell): iCellAbsolute = iMinicolumn*self.pairLayer.getCellsPerColumn() + iCell for context in cellContexts: segments = self.pairLayer.basalConnections.createSegments( [iCellAbsolute]) self.pairLayer.basalConnections.growSynapses( segments, self.contextOperandSDRs[context], 1.0) # Associate the pair layer's minicolumn SDRs with offset cell SDRs, # and associate the pooling layer's location SDRs with a pool of pair SDRs. for iDriving, cDriving in enumerate(self.drivingOperandSDRs): minicolumnSDR = self.minicolumnSDRs[iDriving] self.pairLayerProximalConnections.associate(minicolumnSDR, cDriving) for iContext, cContext in enumerate(self.contextOperandSDRs): iResult = (iContext + iDriving) % self.numLocations cResult = self.resultSDRs[iResult] cPair = [ iMinicolumn*self.pairLayer.getCellsPerColumn() + iCell for iMinicolumn in minicolumnSDR for iCell, cellContexts in enumerate(pairLayerContexts[iMinicolumn]) if iContext in cellContexts] assert len(cPair) == len(minicolumnSDR) self.poolingLayer.associate(cResult, cPair) def testInferenceOnUnions(self, unionSize, numTests=300): """ Select a random driving operand and a random union of context operands. Test how well outputs a union of results. Perform the test multiple times with different random selections. """ additionalSDRCounts = [] for _ in xrange(numTests): iContexts = random.sample(xrange(self.numLocations), unionSize) iDriving = random.choice(xrange(self.numLocations)) cDriving = self.drivingOperandSDRs[iDriving] cContext = np.unique(np.concatenate( [self.contextOperandSDRs[iContext] for iContext in iContexts])) cResultExpected = np.unique(np.concatenate( [self.resultSDRs[(iContext + iDriving) % self.numLocations] for iContext in iContexts])) self.pairLayerProximalConnections.infer(cDriving) minicolumnSDR = self.pairLayerProximalConnections.activeCells assert minicolumnSDR.size == self.numActiveMinicolumns self.pairLayer.compute(minicolumnSDR, basalInput=cContext, learn=False) self.poolingLayer.infer(self.pairLayer.getActiveCells()) assert np.all(np.in1d(cResultExpected, self.poolingLayer.activeCells)) additionalSDRCounts.append( np.setdiff1d(self.poolingLayer.activeCells, cResultExpected).size / self.numActiveResultCells ) return additionalSDRCounts
class SingleLayerLocation2DExperiment(object): """ The experiment code organized into a class. """ def __init__(self, diameter, objects, featureNames): self.diameter = diameter self.objects = objects # A grid of location SDRs. self.locations = dict( ((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32")) for i in xrange(diameter) for j in xrange(diameter)) # 8 transition SDRs -- one for each straight and diagonal direction. self.transitions = dict( ((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32")) for i in xrange(-1, 2) for j in xrange(-1, 2) if i != 0 or j != 0) self.features = dict( (k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32")) for k in featureNames) self.locationLayer = SingleLayerLocationMemory( **{ "cellCount": 1000, "deltaLocationInputSize": 1000, "featureLocationInputSize": 150 * 32, "sampleSize": 15, "activationThreshold": 10, "learningThreshold": 8, }) self.inputLayer = ApicalTiebreakPairMemory( **{ "columnCount": 150, "cellsPerColumn": 32, "basalInputSize": 1000, "apicalInputSize": 4096, }) self.objectLayer = ColumnPooler(**{"inputWidth": 150 * 32}) # Use these for classifying SDRs and for testing whether they're correct. self.inputRepresentations = {} self.objectRepresentations = {} self.learnedObjectPlacements = {} self.monitors = {} self.nextMonitorToken = 1 def addMonitor(self, monitor): """ Subscribe to SingleLayer2DExperiment events. @param monitor (SingleLayer2DExperimentMonitor) An object that implements a set of monitor methods @return (object) An opaque object that can be used to refer to this monitor. """ token = self.nextMonitorToken self.nextMonitorToken += 1 self.monitors[token] = monitor return token def removeMonitor(self, monitorToken): """ Unsubscribe from LocationExperiment events. @param monitorToken (object) The return value of addMonitor() from when this monitor was added """ del self.monitors[monitorToken] def doTimestep(self, locationSDR, transitionSDR, featureSDR, egocentricLocation, learn): """ Run one timestep. """ for monitor in self.monitors.values(): monitor.beforeTimestep(locationSDR, transitionSDR, featureSDR, egocentricLocation, learn) params = { "newLocation": locationSDR, "deltaLocation": transitionSDR, "featureLocationInput": self.inputLayer.getActiveCells(), "featureLocationGrowthCandidates": self.inputLayer.getPredictedActiveCells(), "learn": learn, } self.locationLayer.compute(**params) for monitor in self.monitors.values(): monitor.afterLocationCompute(**params) params = { "activeColumns": featureSDR, "basalInput": self.locationLayer.getActiveCells(), "apicalInput": self.objectLayer.getActiveCells(), } self.inputLayer.compute(**params) for monitor in self.monitors.values(): monitor.afterInputCompute(**params) params = { "feedforwardInput": self.inputLayer.getActiveCells(), "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(), "learn": learn, } self.objectLayer.compute(**params) for monitor in self.monitors.values(): monitor.afterObjectCompute(**params) def learnTransitions(self): """ Train the location layer to do path integration. For every location, teach it each previous-location + motor command pair. """ print "Learning transitions" for (i, j), locationSDR in self.locations.iteritems(): print "i, j", (i, j) for (di, dj), transitionSDR in self.transitions.iteritems(): i2 = i + di j2 = j + dj if (0 <= i2 < self.diameter and 0 <= j2 < self.diameter): for _ in xrange(5): self.locationLayer.reset() self.locationLayer.compute( newLocation=self.locations[(i, j)]) self.locationLayer.compute( deltaLocation=transitionSDR, newLocation=self.locations[(i2, j2)]) self.locationLayer.reset() def learnObjects(self, objectPlacements): """ Learn each provided object in egocentric space. Touch every location on each object. This method doesn't try move the sensor along a path. Instead it just leaps the sensor to each object location, resetting the location layer with each leap. This method simultaneously learns 4 sets of synapses: - location -> input - input -> location - input -> object - object -> input """ for monitor in self.monitors.values(): monitor.afterPlaceObjects(objectPlacements) for objectName, objectDict in self.objects.iteritems(): self.reset() objectPlacement = objectPlacements[objectName] for locationName, featureName in objectDict.iteritems(): egocentricLocation = (locationName[0] + objectPlacement[0], locationName[1] + objectPlacement[1]) locationSDR = self.locations[egocentricLocation] featureSDR = self.features[featureName] transitionSDR = np.empty(0) self.locationLayer.reset() self.inputLayer.reset() for _ in xrange(10): self.doTimestep(locationSDR, transitionSDR, featureSDR, egocentricLocation, learn=True) self.inputRepresentations[( featureName, egocentricLocation)] = (self.inputLayer.getActiveCells()) self.objectRepresentations[ objectName] = self.objectLayer.getActiveCells() self.learnedObjectPlacements[objectName] = objectPlacement def _selectTransition(self, allocentricLocation, objectDict, visitCounts): """ Choose the transition that lands us in the location we've touched the least often. Break ties randomly, i.e. choose the first candidate in a shuffled list. """ candidates = list( transition for transition in self.transitions.keys() if (allocentricLocation[0] + transition[0], allocentricLocation[1] + transition[1]) in objectDict) random.shuffle(candidates) selectedVisitCount = None selectedTransition = None selectedAllocentricLocation = None for transition in candidates: candidateLocation = (allocentricLocation[0] + transition[0], allocentricLocation[1] + transition[1]) if (selectedVisitCount is None or visitCounts[candidateLocation] < selectedVisitCount): selectedVisitCount = visitCounts[candidateLocation] selectedTransition = transition selectedAllocentricLocation = candidateLocation return selectedAllocentricLocation, selectedTransition def inferObject(self, objectPlacements, objectName, startPoint, transitionSequence, settlingTime=2): for monitor in self.monitors.values(): monitor.afterPlaceObjects(objectPlacements) objectDict = self.objects[objectName] self.reset() allocentricLocation = startPoint nextTransitionSDR = np.empty(0, dtype="uint32") transitionIterator = iter(transitionSequence) try: while True: featureName = objectDict[allocentricLocation] egocentricLocation = (allocentricLocation[0] + objectPlacements[objectName][0], allocentricLocation[1] + objectPlacements[objectName][1]) featureSDR = self.features[featureName] steps = ([nextTransitionSDR] + [np.empty(0)] * settlingTime) for transitionSDR in steps: self.doTimestep(np.empty(0), transitionSDR, featureSDR, egocentricLocation, learn=False) transitionName = transitionIterator.next() allocentricLocation = (allocentricLocation[0] + transitionName[0], allocentricLocation[1] + transitionName[1]) nextTransitionSDR = self.transitions[transitionName] except StopIteration: pass def inferObjectsWithRandomMovements(self, objectPlacements, maxTouches=20, settlingTime=2): """ Infer each object without any location input. """ for monitor in self.monitors.values(): monitor.afterPlaceObjects(objectPlacements) for objectName, objectDict in self.objects.iteritems(): self.reset() visitCounts = defaultdict(int) learnedObjectPlacement = self.learnedObjectPlacements[objectName] allocentricLocation = random.choice(objectDict.keys()) nextTransitionSDR = np.empty(0, dtype="uint32") # Traverse the object until it is inferred. success = False for _ in xrange(maxTouches): featureName = objectDict[allocentricLocation] egocentricLocation = (allocentricLocation[0] + objectPlacements[objectName][0], allocentricLocation[1] + objectPlacements[objectName][1]) featureSDR = self.features[featureName] steps = ([nextTransitionSDR] + [np.empty(0)] * settlingTime) for transitionSDR in steps: self.doTimestep(np.empty(0), transitionSDR, featureSDR, egocentricLocation, learn=False) visitCounts[allocentricLocation] += 1 # We should eventually infer the egocentric location where we originally # learned this location on the object. learnedEgocentricLocation = (allocentricLocation[0] + learnedObjectPlacement[0], allocentricLocation[1] + learnedObjectPlacement[1]) if (set(self.objectLayer.getActiveCells()) == set( self.objectRepresentations[objectName]) and set(self.inputLayer.getActiveCells()) == set( self.inputRepresentations[( featureName, learnedEgocentricLocation)]) and set(self.locationLayer.getActiveCells()) == set( self.locations[learnedEgocentricLocation])): success = True break else: allocentricLocation, transitionName = self._selectTransition( allocentricLocation, objectDict, visitCounts) nextTransitionSDR = self.transitions[transitionName] def reset(self): self.locationLayer.reset() self.objectLayer.reset() self.inputLayer.reset() for monitor in self.monitors.values(): monitor.afterReset()
class PIUNCorticalColumn(object): """ A L4 + L6a network. Sensory input causes minicolumns in L4 to activate, which drives activity in L6a. Motor input causes L6a to perform path integration, updating its activity, which then depolarizes cells in L4. Whenever the sensor moves, call movementCompute. Whenever a sensory input arrives, call sensoryCompute. """ def __init__(self, locationConfigs, L4Overrides=None, bumpType="gaussian"): """ @param L4Overrides (dict) Custom parameters for L4 @param locationConfigs (sequence of dicts) Parameters for the location modules """ self.bumpType = bumpType L4cellCount = 150*16 if bumpType == "gaussian": self.L6aModules = [ createRatModuleFromCellCount( anchorInputSize=L4cellCount, **config) for config in locationConfigs] elif bumpType == "gaussian2": self.L6aModules = [ createRatModuleFromReadoutResolution( anchorInputSize=L4cellCount, **config) for config in locationConfigs] elif bumpType == "square": self.L6aModules = [ Superficial2DLocationModule( anchorInputSize=L4cellCount, **config) for config in locationConfigs] else: raise ValueError("Invalid bumpType", bumpType) L4Params = { "columnCount": 150, "cellsPerColumn": 16, "basalInputSize": sum(module.numberOfCells() for module in self.L6aModules) } if L4Overrides is not None: L4Params.update(L4Overrides) self.L4 = ApicalTiebreakPairMemory(**L4Params) def movementCompute(self, displacement, noiseFactor = 0, moduleNoiseFactor = 0): """ @param displacement (dict) The change in location. Example: {"top": 10, "left", 10} @return (dict) Data for logging/tracing. """ if noiseFactor != 0: xdisp = np.random.normal(0, noiseFactor) ydisp = np.random.normal(0, noiseFactor) else: xdisp = 0 ydisp = 0 locationParams = { "displacement": [displacement["top"] + ydisp, displacement["left"] + xdisp], "noiseFactor": moduleNoiseFactor } for module in self.L6aModules: module.movementCompute(**locationParams) return locationParams def sensoryCompute(self, activeMinicolumns, learn): """ @param activeMinicolumns (numpy array) List of indices of minicolumns to activate. @param learn (bool) If True, the two layers should learn this association. @return (tuple of dicts) Data for logging/tracing. """ inputParams = { "activeColumns": activeMinicolumns, "basalInput": self.getLocationRepresentation(), "basalGrowthCandidates": self.getLearnableLocationRepresentation(), "learn": learn } self.L4.compute(**inputParams) locationParams = { "anchorInput": self.L4.getActiveCells(), "anchorGrowthCandidates": self.L4.getWinnerCells(), "learn": learn, } for module in self.L6aModules: module.sensoryCompute(**locationParams) return (inputParams, locationParams) def reset(self): """ Clear all cell activity. """ self.L4.reset() for module in self.L6aModules: module.reset() def activateRandomLocation(self): """ Activate a random location in the location layer. """ for module in self.L6aModules: module.activateRandomLocation() def getSensoryRepresentation(self): """ Gets the active cells in the sensory layer. """ return self.L4.getActiveCells() def getLocationRepresentation(self): """ Get the full population representation of the location layer. """ activeCells = np.array([], dtype="uint32") totalPrevCells = 0 for module in self.L6aModules: activeCells = np.append(activeCells, module.getActiveCells() + totalPrevCells) totalPrevCells += module.numberOfCells() return activeCells def getLearnableLocationRepresentation(self): """ Get the cells in the location layer that should be associated with the sensory input layer representation. In some models, this is identical to the active cells. In others, it's a subset. """ learnableCells = np.array([], dtype="uint32") totalPrevCells = 0 for module in self.L6aModules: learnableCells = np.append(learnableCells, module.getLearnableCells() + totalPrevCells) totalPrevCells += module.numberOfCells() return learnableCells def getSensoryAssociatedLocationRepresentation(self): """ Get the location cells in the location layer that were driven by the input layer (or, during learning, were associated with this input.) """ cells = np.array([], dtype="uint32") totalPrevCells = 0 for module in self.L6aModules: cells = np.append(cells, module.sensoryAssociatedCells + totalPrevCells) totalPrevCells += module.numberOfCells() return cells
for c in range(len(x)): if c > 0 and c % 10 == 0: s += ' ' s += str(x[c]) s += ' ' return s # Step 1: create Temporal Pooler instance with appropriate parameters tm = TM( columnCount=10, basalInputSize=4, cellsPerColumn=4, initialPermanence=0.21, connectedPermanence=0.0, minThreshold=1, reducedBasalThreshold=1, permanenceIncrement=0.1, permanenceDecrement=0.0, activationThreshold=2, apicalInputSize=4, ) # Step 2: create input vectors to feed to the temporal memory. D = [[1, 0, 0, 0, 1, 0, 0, 0, 0, 1], [0, 1, 0, 1, 0, 0, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 1, 0, 0, 0]] x = [[1, 0, 1, 0], [0, 1, 0, 1], [1, 1, 0, 0]] Din0 = np.nonzero(D[0])[0] Din1 = np.nonzero(D[1])[0] Din2 = np.nonzero(D[2])[0]
class SingleLayerLocation2DExperiment(object): """ The experiment code organized into a class. """ def __init__(self, diameter, objects, featureNames): self.diameter = diameter self.objects = objects # A grid of location SDRs. self.locations = dict( ((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32")) for i in xrange(diameter) for j in xrange(diameter)) # 8 transition SDRs -- one for each straight and diagonal direction. self.transitions = dict( ((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32")) for i in xrange(-1, 2) for j in xrange(-1, 2) if i != 0 or j != 0) self.features = dict( (k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32")) for k in featureNames) self.locationLayer = SingleLayerLocationMemory(**{ "cellCount": 1000, "deltaLocationInputSize": 1000, "featureLocationInputSize": 150*32, "sampleSize": 15, "activationThreshold": 10, "learningThreshold": 8, }) self.inputLayer = ApicalTiebreakPairMemory(**{ "columnCount": 150, "cellsPerColumn": 32, "basalInputSize": 1000, "apicalInputSize": 4096, }) self.objectLayer = ColumnPooler(**{ "inputWidth": 150 * 32 }) # Use these for classifying SDRs and for testing whether they're correct. self.inputRepresentations = {} self.objectRepresentations = {} self.learnedObjectPlacements = {} self.monitors = {} self.nextMonitorToken = 1 def addMonitor(self, monitor): """ Subscribe to SingleLayer2DExperiment events. @param monitor (SingleLayer2DExperimentMonitor) An object that implements a set of monitor methods @return (object) An opaque object that can be used to refer to this monitor. """ token = self.nextMonitorToken self.nextMonitorToken += 1 self.monitors[token] = monitor return token def removeMonitor(self, monitorToken): """ Unsubscribe from LocationExperiment events. @param monitorToken (object) The return value of addMonitor() from when this monitor was added """ del self.monitors[monitorToken] def doTimestep(self, locationSDR, transitionSDR, featureSDR, egocentricLocation, learn): """ Run one timestep. """ for monitor in self.monitors.values(): monitor.beforeTimestep(locationSDR, transitionSDR, featureSDR, egocentricLocation, learn) params = { "newLocation": locationSDR, "deltaLocation": transitionSDR, "featureLocationInput": self.inputLayer.getActiveCells(), "featureLocationGrowthCandidates": self.inputLayer.getPredictedActiveCells(), "learn": learn, } self.locationLayer.compute(**params) for monitor in self.monitors.values(): monitor.afterLocationCompute(**params) params = { "activeColumns": featureSDR, "basalInput": self.locationLayer.getActiveCells(), "apicalInput": self.objectLayer.getActiveCells(), } self.inputLayer.compute(**params) for monitor in self.monitors.values(): monitor.afterInputCompute(**params) params = { "feedforwardInput": self.inputLayer.getActiveCells(), "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(), "learn": learn, } self.objectLayer.compute(**params) for monitor in self.monitors.values(): monitor.afterObjectCompute(**params) def learnTransitions(self): """ Train the location layer to do path integration. For every location, teach it each previous-location + motor command pair. """ print "Learning transitions" for (i, j), locationSDR in self.locations.iteritems(): print "i, j", (i, j) for (di, dj), transitionSDR in self.transitions.iteritems(): i2 = i + di j2 = j + dj if (0 <= i2 < self.diameter and 0 <= j2 < self.diameter): for _ in xrange(5): self.locationLayer.reset() self.locationLayer.compute(newLocation=self.locations[(i,j)]) self.locationLayer.compute(deltaLocation=transitionSDR, newLocation=self.locations[(i2, j2)]) self.locationLayer.reset() def learnObjects(self, objectPlacements): """ Learn each provided object in egocentric space. Touch every location on each object. This method doesn't try move the sensor along a path. Instead it just leaps the sensor to each object location, resetting the location layer with each leap. This method simultaneously learns 4 sets of synapses: - location -> input - input -> location - input -> object - object -> input """ for monitor in self.monitors.values(): monitor.afterPlaceObjects(objectPlacements) for objectName, objectDict in self.objects.iteritems(): self.reset() objectPlacement = objectPlacements[objectName] for locationName, featureName in objectDict.iteritems(): egocentricLocation = (locationName[0] + objectPlacement[0], locationName[1] + objectPlacement[1]) locationSDR = self.locations[egocentricLocation] featureSDR = self.features[featureName] transitionSDR = np.empty(0) self.locationLayer.reset() self.inputLayer.reset() for _ in xrange(10): self.doTimestep(locationSDR, transitionSDR, featureSDR, egocentricLocation, learn=True) self.inputRepresentations[(featureName, egocentricLocation)] = ( self.inputLayer.getActiveCells()) self.objectRepresentations[objectName] = self.objectLayer.getActiveCells() self.learnedObjectPlacements[objectName] = objectPlacement def _selectTransition(self, allocentricLocation, objectDict, visitCounts): """ Choose the transition that lands us in the location we've touched the least often. Break ties randomly, i.e. choose the first candidate in a shuffled list. """ candidates = list(transition for transition in self.transitions.keys() if (allocentricLocation[0] + transition[0], allocentricLocation[1] + transition[1]) in objectDict) random.shuffle(candidates) selectedVisitCount = None selectedTransition = None selectedAllocentricLocation = None for transition in candidates: candidateLocation = (allocentricLocation[0] + transition[0], allocentricLocation[1] + transition[1]) if (selectedVisitCount is None or visitCounts[candidateLocation] < selectedVisitCount): selectedVisitCount = visitCounts[candidateLocation] selectedTransition = transition selectedAllocentricLocation = candidateLocation return selectedAllocentricLocation, selectedTransition def inferObject(self, objectPlacements, objectName, startPoint, transitionSequence, settlingTime=2): for monitor in self.monitors.values(): monitor.afterPlaceObjects(objectPlacements) objectDict = self.objects[objectName] self.reset() allocentricLocation = startPoint nextTransitionSDR = np.empty(0, dtype="uint32") transitionIterator = iter(transitionSequence) try: while True: featureName = objectDict[allocentricLocation] egocentricLocation = (allocentricLocation[0] + objectPlacements[objectName][0], allocentricLocation[1] + objectPlacements[objectName][1]) featureSDR = self.features[featureName] steps = ([nextTransitionSDR] + [np.empty(0)]*settlingTime) for transitionSDR in steps: self.doTimestep(np.empty(0), transitionSDR, featureSDR, egocentricLocation, learn=False) transitionName = transitionIterator.next() allocentricLocation = (allocentricLocation[0] + transitionName[0], allocentricLocation[1] + transitionName[1]) nextTransitionSDR = self.transitions[transitionName] except StopIteration: pass def inferObjectsWithRandomMovements(self, objectPlacements, maxTouches=20, settlingTime=2): """ Infer each object without any location input. """ for monitor in self.monitors.values(): monitor.afterPlaceObjects(objectPlacements) for objectName, objectDict in self.objects.iteritems(): self.reset() visitCounts = defaultdict(int) learnedObjectPlacement = self.learnedObjectPlacements[objectName] allocentricLocation = random.choice(objectDict.keys()) nextTransitionSDR = np.empty(0, dtype="uint32") # Traverse the object until it is inferred. success = False for _ in xrange(maxTouches): featureName = objectDict[allocentricLocation] egocentricLocation = (allocentricLocation[0] + objectPlacements[objectName][0], allocentricLocation[1] + objectPlacements[objectName][1]) featureSDR = self.features[featureName] steps = ([nextTransitionSDR] + [np.empty(0)]*settlingTime) for transitionSDR in steps: self.doTimestep(np.empty(0), transitionSDR, featureSDR, egocentricLocation, learn=False) visitCounts[allocentricLocation] += 1 # We should eventually infer the egocentric location where we originally # learned this location on the object. learnedEgocentricLocation = ( allocentricLocation[0] + learnedObjectPlacement[0], allocentricLocation[1] + learnedObjectPlacement[1]) if (set(self.objectLayer.getActiveCells()) == set(self.objectRepresentations[objectName]) and set(self.inputLayer.getActiveCells()) == set(self.inputRepresentations[(featureName, learnedEgocentricLocation)]) and set(self.locationLayer.getActiveCells()) == set(self.locations[learnedEgocentricLocation])): success = True break else: allocentricLocation, transitionName = self._selectTransition( allocentricLocation, objectDict, visitCounts) nextTransitionSDR = self.transitions[transitionName] def reset(self): self.locationLayer.reset() self.objectLayer.reset() self.inputLayer.reset() for monitor in self.monitors.values(): monitor.afterReset()
class RelationalMemory(object): def __init__(self, l4N, l4W, numModules, moduleDimensions, maxActivePerModule, l6ActivationThreshold): self.numModules = numModules self.moduleDimensions = moduleDimensions self._cellsPerModule = np.prod(moduleDimensions) self.maxActivePerModule = maxActivePerModule self.l4N = l4N self.l4W = l4W self.l6ActivationThreshold = l6ActivationThreshold self.l4TM = TemporalMemory( columnCount=l4N, basalInputSize=numModules * self._cellsPerModule, cellsPerColumn=4, #activationThreshold=int(numModules / 2) + 1, #reducedBasalThreshold=int(numModules / 2) + 1, activationThreshold=1, reducedBasalThreshold=1, initialPermanence=1.0, connectedPermanence=0.5, minThreshold=1, sampleSize=numModules, permanenceIncrement=1.0, permanenceDecrement=0.0, ) self.l6Connections = [ Connections(numCells=self._cellsPerModule) for _ in xrange(numModules) ] #self.classifier = KNNClassifier(k=1, distanceMethod="rawOverlap") self.classifier = KNNClassifier(k=1, distanceMethod="norm") # Active state self.activeL6Cells = [[] for _ in xrange(numModules)] self.activeL5Cells = [[] for _ in xrange(numModules)] self.predictedL6Cells = [set([]) for _ in xrange(numModules)] # Debug state self.activeL6BeforeMotor = [[] for _ in xrange(numModules)] self.l6ToL4Map = collections.defaultdict(list) def reset(self): self.activeL6Cells = [[] for _ in xrange(self.numModules)] self.activeL5Cells = [[] for _ in xrange(self.numModules)] self.predictedL6Cells = [set([]) for _ in xrange(self.numModules)] def trainFeatures(self, sensoryInputs): # Randomly assign bilateral connections and zero others for sense in sensoryInputs: # Choose L6 cells randomly activeL6Cells = [[np.random.randint(self._cellsPerModule)] for _ in xrange(self.numModules)] l4BasalInput = getGlobalIndices(activeL6Cells, self._cellsPerModule) # Learn L6->L4 connections self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True) self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True) self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True) self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True) activeL4Cells = self.l4TM.getActiveCells() # Debug: store the map for l6Cell in itertools.chain(*activeL6Cells): self.l6ToL4Map[l6Cell].extend(activeL4Cells) # Learn L4->L6 connections for l6Cells, connections in zip(activeL6Cells, self.l6Connections): # Assumes one cell active per L6 module when training features segment = connections.createSegment(l6Cells[0]) for l4Cell in activeL4Cells: connections.createSynapse(segment, l4Cell, 1.0) def compute(self, ff, motor, objClass, outputFile): """Run one iteration of the online sensorimotor algorithm. This function has three stages: - The FEEDFORWARD pass drives Prerequisites: `trainFeatures` must have been run already :param ff: feedforward sensory input :param motor: the motor command for next move, in the form of delta coordinates :param objClass: the object class to train the classifier, or None if not learning """ delta = motor # FEEDFORWARD # Determine active feature representation in l4, using lateral input # from l6 previous step feedback l4BasalInput = getGlobalIndices(self.predictedL6Cells, self._cellsPerModule) self.l4TM.compute(activeColumns=ff, basalInput=l4BasalInput, learn=False) predictedL4Cells = self.l4TM.getPredictedCells() activeL4Cells = self.l4TM.getActiveCells() # Drive L6 activation from l4 for m, connections in enumerate(self.l6Connections): newCells = [] activeConnectedPerSegment = connections.computeActivity( activeL4Cells, 0.5)[0] for flatIdx, activeConnected in enumerate( activeConnectedPerSegment): if activeConnected >= self.l6ActivationThreshold: cellIdx = connections.segmentForFlatIdx(flatIdx).cell newCells.append(cellIdx) #for cell in newCells: # print connections.segmentsForCell(cell) #print newCells #assert len(newCells) <= 1 self.activeL6Cells[m].insert(0, newCells) # TODO: This is the number of steps, not necessarily the number of cells lenBefore = len(self.activeL6Cells[m]) del self.activeL6Cells[m][self.maxActivePerModule:] lenAfter = len(self.activeL6Cells[m]) #assert lenBefore == lenAfter, "Debug assert to check that we aren't hitting limit on L6 activity. Can remove when we set max active low enough relative to object size (times number of train/test iterations)" self.activeL6BeforeMotor = [ list(itertools.chain(*l6Module)) for l6Module in self.activeL6Cells ] # Replace l5 activity with new transforms self.activeL5Cells = [] for activeL6Module in self.activeL6Cells: transforms = set() for newCell in activeL6Module[0]: for prevCell in itertools.chain(*activeL6Module[1:]): if newCell == prevCell: continue # Transform from prev to new t1 = bind(prevCell, newCell, self.moduleDimensions) transforms.add(t1) # Transform from new to prev t2 = bind(newCell, prevCell, self.moduleDimensions) transforms.add(t2) self.activeL5Cells.append(list(transforms)) # Pool into object representation globalL5ActiveCells = getGlobalIndices(self.activeL5Cells, self._cellsPerModule) denseL5 = np.zeros(self._cellsPerModule * self.numModules, dtype="bool") denseL5[globalL5ActiveCells] = 1 self.prediction = self.classifier.infer(denseL5) if objClass is not None: self.classifier.learn(denseL5, objClass) #print globalL5ActiveCells # MOTOR # Update L6 based on motor command numActivePerModuleBefore = [ sum([len(cells) for cells in active]) for active in self.activeL6Cells ] self.activeL6Cells = [[[ pathIntegrate(c, self.moduleDimensions, delta) for c in steps ] for steps in prevActiveCells] for prevActiveCells in self.activeL6Cells] numActivePerModuleAfter = [ sum([len(cells) for cells in active]) for active in self.activeL6Cells ] assert numActivePerModuleAfter == numActivePerModuleBefore # FEEDBACK # Get all transforms associated with object # TODO: Get transforms from object in addition to current activity predictiveTransforms = [l5Active for l5Active in self.activeL5Cells] # Get set of predicted l6 representations (including already active) # and store them for next step l4 compute self.predictedL6Cells = [] for l6, l5 in itertools.izip(self.activeL6Cells, predictiveTransforms): predictedCells = [] for activeL6Cell in set(itertools.chain(*l6)): for activeL5Cell in l5: predictedCell = unbind(activeL6Cell, activeL5Cell, self.moduleDimensions) predictedCells.append(predictedCell) self.predictedL6Cells.append( set(list(itertools.chain(*l6)) + predictedCells)) # Log this step if outputFile: log = RelationalMemoryLog.new_message() log.ts = time.time() sensationProto = log.init("sensation", len(ff)) for i in xrange(len(ff)): sensationProto[i] = int(ff[i]) predictedL4Proto = log.init("predictedL4", len(predictedL4Cells)) for i in xrange(len(predictedL4Cells)): predictedL4Proto[i] = int(predictedL4Cells[i]) activeL4Proto = log.init("activeL4", len(activeL4Cells)) for i in xrange(len(activeL4Cells)): activeL4Proto[i] = int(activeL4Cells[i]) activeL6HistoryProto = log.init("activeL6History", len(self.activeL6Cells)) for i in xrange(len(self.activeL6Cells)): activeL6ModuleProto = activeL6HistoryProto.init( i, len(self.activeL6Cells[i])) for j in xrange(len(self.activeL6Cells[i])): activeL6ModuleStepProto = activeL6ModuleProto.init( j, len(self.activeL6Cells[i][j])) for k in xrange(len(self.activeL6Cells[i][j])): activeL6ModuleStepProto[k] = int( self.activeL6Cells[i][j][k]) activeL5Proto = log.init("activeL5", len(self.activeL5Cells)) for i in xrange(len(self.activeL5Cells)): activeL5ModuleProto = activeL5Proto.init( i, len(self.activeL5Cells[i])) for j in xrange(len(self.activeL5Cells[i])): activeL5ModuleProto[j] = int(self.activeL5Cells[i][j]) classifierResults = [ (i, distance) for i, distance in enumerate(self.prediction[2]) if distance is not None ] classifierResultsProto = log.init("classifierResults", len(classifierResults)) for i in xrange(len(classifierResults)): classifierResultProto = classifierResultsProto[i] classifierResultProto.label = classifierResults[i][0] classifierResultProto.distance = float(classifierResults[i][1]) motorDeltaProto = log.init("motorDelta", len(delta)) for i in xrange(len(delta)): motorDeltaProto[i] = int(delta[i]) predictedL6Proto = log.init("predictedL6", len(self.predictedL6Cells)) for i in xrange(len(self.predictedL6Cells)): predictedL6ModuleProto = predictedL6Proto.init( i, len(self.predictedL6Cells[i])) for j, c in enumerate(self.predictedL6Cells[i]): predictedL6ModuleProto[j] = int(c) json.dump(log.to_dict(), outputFile) outputFile.write("\n")
class PIUNCorticalColumn(object): """ A L4 + L6a network. Sensory input causes minicolumns in L4 to activate, which drives activity in L6a. Motor input causes L6a to perform path integration, updating its activity, which then depolarizes cells in L4. Whenever the sensor moves, call movementCompute. Whenever a sensory input arrives, call sensoryCompute. """ def __init__(self, locationConfigs, L4Overrides=None, bumpType="gaussian"): """ @param L4Overrides (dict) Custom parameters for L4 @param locationConfigs (sequence of dicts) Parameters for the location modules """ self.bumpType = bumpType L4cellCount = 150 * 16 if bumpType == "gaussian": self.L6aModules = [ createRatModuleFromCellCount(anchorInputSize=L4cellCount, **config) for config in locationConfigs ] elif bumpType == "gaussian2": self.L6aModules = [ createRatModuleFromReadoutResolution( anchorInputSize=L4cellCount, **config) for config in locationConfigs ] elif bumpType == "square": self.L6aModules = [ Superficial2DLocationModule(anchorInputSize=L4cellCount, **config) for config in locationConfigs ] else: raise ValueError("Invalid bumpType", bumpType) L4Params = { "columnCount": 150, "cellsPerColumn": 16, "basalInputSize": sum(module.numberOfCells() for module in self.L6aModules) } if L4Overrides is not None: L4Params.update(L4Overrides) self.L4 = ApicalTiebreakPairMemory(**L4Params) def movementCompute(self, displacement, noiseFactor=0, moduleNoiseFactor=0): """ @param displacement (dict) The change in location. Example: {"top": 10, "left", 10} @return (dict) Data for logging/tracing. """ if noiseFactor != 0: xdisp = np.random.normal(0, noiseFactor) ydisp = np.random.normal(0, noiseFactor) else: xdisp = 0 ydisp = 0 locationParams = { "displacement": [displacement["top"] + ydisp, displacement["left"] + xdisp], "noiseFactor": moduleNoiseFactor } for module in self.L6aModules: module.movementCompute(**locationParams) return locationParams def sensoryCompute(self, activeMinicolumns, learn): """ @param activeMinicolumns (numpy array) List of indices of minicolumns to activate. @param learn (bool) If True, the two layers should learn this association. @return (tuple of dicts) Data for logging/tracing. """ inputParams = { "activeColumns": activeMinicolumns, "basalInput": self.getLocationRepresentation(), "basalGrowthCandidates": self.getLearnableLocationRepresentation(), "learn": learn } self.L4.compute(**inputParams) locationParams = { "anchorInput": self.L4.getActiveCells(), "anchorGrowthCandidates": self.L4.getWinnerCells(), "learn": learn, } for module in self.L6aModules: module.sensoryCompute(**locationParams) return (inputParams, locationParams) def reset(self): """ Clear all cell activity. """ self.L4.reset() for module in self.L6aModules: module.reset() def activateRandomLocation(self): """ Activate a random location in the location layer. """ for module in self.L6aModules: module.activateRandomLocation() def getSensoryRepresentation(self): """ Gets the active cells in the sensory layer. """ return self.L4.getActiveCells() def getLocationRepresentation(self): """ Get the full population representation of the location layer. """ activeCells = np.array([], dtype="uint32") totalPrevCells = 0 for module in self.L6aModules: activeCells = np.append(activeCells, module.getActiveCells() + totalPrevCells) totalPrevCells += module.numberOfCells() return activeCells def getLearnableLocationRepresentation(self): """ Get the cells in the location layer that should be associated with the sensory input layer representation. In some models, this is identical to the active cells. In others, it's a subset. """ learnableCells = np.array([], dtype="uint32") totalPrevCells = 0 for module in self.L6aModules: learnableCells = np.append( learnableCells, module.getLearnableCells() + totalPrevCells) totalPrevCells += module.numberOfCells() return learnableCells def getSensoryAssociatedLocationRepresentation(self): """ Get the location cells in the location layer that were driven by the input layer (or, during learning, were associated with this input.) """ cells = np.array([], dtype="uint32") totalPrevCells = 0 for module in self.L6aModules: cells = np.append(cells, module.sensoryAssociatedCells + totalPrevCells) totalPrevCells += module.numberOfCells() return cells
class Grid2DLocationExperiment(object): """ The experiment code organized into a class. """ def __init__(self, objects, objectPlacements, featureNames, locationConfigs, worldDimensions): self.objects = objects self.objectPlacements = objectPlacements self.worldDimensions = worldDimensions self.features = dict( (k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32")) for k in featureNames) self.locationModules = [SuperficialLocationModule2D(anchorInputSize=150*32, **config) for config in locationConfigs] self.inputLayer = ApicalTiebreakPairMemory(**{ "columnCount": 150, "cellsPerColumn": 32, "basalInputSize": 18 * sum(np.prod(config["cellDimensions"]) for config in locationConfigs), "apicalInputSize": 4096 }) self.objectLayer = ColumnPooler(**{ "inputWidth": 150 * 32 }) # Use these for classifying SDRs and for testing whether they're correct. self.locationRepresentations = { # Example: # (objectName, (top, left)): [0, 26, 54, 77, 101, ...] } self.inputRepresentations = { # Example: # (objectName, (top, left), featureName): [0, 26, 54, 77, 101, ...] } self.objectRepresentations = { # Example: # objectName: [14, 19, 54, 107, 201, ...] } self.locationInWorld = None self.maxSettlingTime = 10 self.monitors = {} self.nextMonitorToken = 1 def addMonitor(self, monitor): """ Subscribe to Grid2DLocationExperimentMonitor events. @param monitor (Grid2DLocationExperimentMonitor) An object that implements a set of monitor methods @return (object) An opaque object that can be used to refer to this monitor. """ token = self.nextMonitorToken self.nextMonitorToken += 1 self.monitors[token] = monitor return token def removeMonitor(self, monitorToken): """ Unsubscribe from LocationExperiment events. @param monitorToken (object) The return value of addMonitor() from when this monitor was added """ del self.monitors[monitorToken] def getActiveLocationCells(self): activeCells = np.array([], dtype="uint32") totalPrevCells = 0 for i, module in enumerate(self.locationModules): activeCells = np.append(activeCells, module.getActiveCells() + totalPrevCells) totalPrevCells += module.numberOfCells() return activeCells def move(self, objectName, locationOnObject): objectPlacement = self.objectPlacements[objectName] locationInWorld = (objectPlacement[0] + locationOnObject[0], objectPlacement[1] + locationOnObject[1]) if self.locationInWorld is not None: deltaLocation = (locationInWorld[0] - self.locationInWorld[0], locationInWorld[1] - self.locationInWorld[1]) for monitor in self.monitors.values(): monitor.beforeMove(deltaLocation) params = { "deltaLocation": deltaLocation } for module in self.locationModules: module.shift(**params) for monitor in self.monitors.values(): monitor.afterLocationShift(**params) self.locationInWorld = locationInWorld for monitor in self.monitors.values(): monitor.afterWorldLocationChanged(locationInWorld) def _senseInferenceMode(self, featureSDR): prevCellActivity = None for i in xrange(self.maxSettlingTime): inputParams = { "activeColumns": featureSDR, "basalInput": self.getActiveLocationCells(), "apicalInput": self.objectLayer.getActiveCells(), "learn": False } self.inputLayer.compute(**inputParams) objectParams = { "feedforwardInput": self.inputLayer.getActiveCells(), "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(), "learn": False, } self.objectLayer.compute(**objectParams) locationParams = { "anchorInput": self.inputLayer.getActiveCells() } for module in self.locationModules: module.anchor(**locationParams) cellActivity = (set(self.objectLayer.getActiveCells()), set(self.inputLayer.getActiveCells()), set(self.getActiveLocationCells())) if cellActivity == prevCellActivity: # It settled. Don't even log this timestep. break else: prevCellActivity = cellActivity for monitor in self.monitors.values(): if i > 0: monitor.markSensoryRepetition() monitor.afterInputCompute(**inputParams) monitor.afterObjectCompute(**objectParams) monitor.afterLocationAnchor(**locationParams) def _senseLearningMode(self, featureSDR): inputParams = { "activeColumns": featureSDR, "basalInput": self.getActiveLocationCells(), "apicalInput": self.objectLayer.getActiveCells(), "learn": True } self.inputLayer.compute(**inputParams) objectParams = { "feedforwardInput": self.inputLayer.getActiveCells(), "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(), "learn": True, } self.objectLayer.compute(**objectParams) locationParams = { "anchorInput": self.inputLayer.getWinnerCells() } for module in self.locationModules: module.learn(**locationParams) for monitor in self.monitors.values(): monitor.afterInputCompute(**inputParams) monitor.afterObjectCompute(**objectParams) def sense(self, featureSDR, learn): for monitor in self.monitors.values(): monitor.beforeSense(featureSDR) if learn: self._senseLearningMode(featureSDR) else: self._senseInferenceMode(featureSDR) def learnObjects(self): """ Learn each provided object. This method simultaneously learns 4 sets of synapses: - location -> input - input -> location - input -> object - object -> input """ for objectName, objectFeatures in self.objects.iteritems(): self.reset() for module in self.locationModules: module.activateRandomLocation() for feature in objectFeatures: locationOnObject = (feature["top"] + feature["height"]/2, feature["left"] + feature["width"]/2) self.move(objectName, locationOnObject) featureName = feature["name"] featureSDR = self.features[featureName] for _ in xrange(10): self.sense(featureSDR, learn=True) self.locationRepresentations[(objectName, locationOnObject)] = ( self.getActiveLocationCells()) self.inputRepresentations[(objectName, locationOnObject, featureName)] = ( self.inputLayer.getActiveCells()) self.objectRepresentations[objectName] = self.objectLayer.getActiveCells() def inferObjectsWithRandomMovements(self): """ Infer each object without any location input. """ for objectName, objectFeatures in self.objects.iteritems(): self.reset() inferred = False prevTouchSequence = None for _ in xrange(4): while True: touchSequence = list(objectFeatures) random.shuffle(touchSequence) if prevTouchSequence is not None: if touchSequence[0] == prevTouchSequence[-1]: continue break for i, feature in enumerate(touchSequence): locationOnObject = (feature["top"] + feature["height"]/2, feature["left"] + feature["width"]/2) self.move(objectName, locationOnObject) featureName = feature["name"] featureSDR = self.features[featureName] self.sense(featureSDR, learn=False) inferred = ( set(self.objectLayer.getActiveCells()) == set(self.objectRepresentations[objectName]) and set(self.inputLayer.getActiveCells()) == set(self.inputRepresentations[(objectName, locationOnObject, featureName)]) and set(self.getActiveLocationCells()) == set(self.locationRepresentations[(objectName, locationOnObject)])) if inferred: break prevTouchSequence = touchSequence if inferred: break def reset(self): for module in self.locationModules: module.reset() self.objectLayer.reset() self.inputLayer.reset() self.locationInWorld = None for monitor in self.monitors.values(): monitor.afterReset()
class PIUNCorticalColumn(object): """ A L4 + L6a network. Sensory input causes minicolumns in L4 to activate, which drives activity in L6a. Motor input causes L6a to perform path integration, updating its activity, which then depolarizes cells in L4. Whenever the sensor moves, call movementCompute. Whenever a sensory input arrives, call sensoryCompute. """ def __init__(self, locationConfigs, L4Overrides=None): """ @param L4Overrides (dict) Custom parameters for L4 @param locationConfigs (sequence of dicts) Parameters for the location modules """ L4Params = { "columnCount": 150, "cellsPerColumn": 16, "basalInputSize": (len(locationConfigs) * sum(np.prod(config["cellDimensions"]) for config in locationConfigs)) } if L4Overrides is not None: L4Params.update(L4Overrides) self.L4 = ApicalTiebreakPairMemory(**L4Params) self.L6aModules = [ Superficial2DLocationModule( anchorInputSize=self.L4.numberOfCells(), **config) for config in locationConfigs] def movementCompute(self, displacement, noiseFactor = 0, moduleNoiseFactor = 0): """ @param displacement (dict) The change in location. Example: {"top": 10, "left", 10} @return (dict) Data for logging/tracing. """ if noiseFactor != 0: xdisp = np.random.normal(0, noiseFactor) ydisp = np.random.normal(0, noiseFactor) else: xdisp = 0 ydisp = 0 locationParams = { "displacement": [displacement["top"] + ydisp, displacement["left"] + xdisp], "noiseFactor": moduleNoiseFactor } for module in self.L6aModules: module.movementCompute(**locationParams) return locationParams def sensoryCompute(self, activeMinicolumns, learn): """ @param activeMinicolumns (numpy array) List of indices of minicolumns to activate. @param learn (bool) If True, the two layers should learn this association. @return (tuple of dicts) Data for logging/tracing. """ inputParams = { "activeColumns": activeMinicolumns, "basalInput": self.getLocationRepresentation(), "learn": learn } self.L4.compute(**inputParams) locationParams = { "anchorInput": self.L4.getActiveCells(), "anchorGrowthCandidates": self.L4.getWinnerCells(), "learn": learn, } for module in self.L6aModules: module.sensoryCompute(**locationParams) return (inputParams, locationParams) def reset(self): """ Clear all cell activity. """ self.L4.reset() for module in self.L6aModules: module.reset() def activateRandomLocation(self): """ Activate a random location in the location layer. """ for module in self.L6aModules: module.activateRandomLocation() def getSensoryRepresentation(self): """ Gets the active cells in the sensory layer. """ return self.L4.getActiveCells() def getLocationRepresentation(self): """ Get the full population representation of the location layer. """ activeCells = np.array([], dtype="uint32") totalPrevCells = 0 for module in self.L6aModules: activeCells = np.append(activeCells, module.getActiveCells() + totalPrevCells) totalPrevCells += module.numberOfCells() return activeCells
class LVF(object): """Class implementing Localization with Vision Features""" def __init__( self, minX, maxX, minY, maxY, bottomUpInputSize, bottomUpOnBits, ): self.xEncoder = ScalarEncoder(5, minX, 10 * maxX, n=75, forced=True) self.yEncoder = ScalarEncoder(5, minY, 10 * maxY, n=75, forced=True) self.externalSize = self.xEncoder.getWidth()**2 self.externalOnBits = self.xEncoder.w**2 self.bottomUpInputSize = bottomUpInputSize self.bottomUpOnBits = bottomUpOnBits self.trainingIterations = 0 self.testIterations = 0 self.maxPredictionError = 0 self.totalPredictionError = 0 self.numMissedPredictions = 0 self.tm = TM(columnCount=self.bottomUpInputSize, basalInputSize=self.externalSize, cellsPerColumn=4, initialPermanence=0.4, connectedPermanence=0.5, minThreshold=self.externalOnBits, sampleSize=40, permanenceIncrement=0.1, permanenceDecrement=0.00, activationThreshold=int( 0.75 * (self.externalOnBits + self.bottomUpOnBits)), basalPredictedSegmentDecrement=0.00, seed=42) def compute(self, x, y, bottomUpSDR, learn): # Encode the inputs appropriately and train the HTM externalSDR = self.encodePosition(x, y) if learn: # During learning we provide the current pose angle as bottom up input self.trainTM(bottomUpSDR, externalSDR) self.trainingIterations += 1 else: print >> sys.stderr, "Learn: ", learn def encodePosition(self, x, y): """Return the SDR for x,y""" xe = self.xEncoder.encode(x) ye = self.yEncoder.encode(y) ex = np.outer(xe, ye) return ex.flatten().nonzero()[0] def trainTM(self, bottomUp, externalInput): #print >> sys.stderr, "Bottom up: ", bottomUp #print >> sys.stderr, "ExternalInput: ",externalInput self.tm.compute(bottomUp, basalInput=externalInput, learn=True)
class PIUNCorticalColumn(object): """ A L4 + L6a network. Sensory input causes minicolumns in L4 to activate, which drives activity in L6a. Motor input causes L6a to perform path integration, updating its activity, which then depolarizes cells in L4. Whenever the sensor moves, call movementCompute. Whenever a sensory input arrives, call sensoryCompute. """ def __init__(self, locationConfigs, L4Overrides=None): """ @param L4Overrides (dict) Custom parameters for L4 @param locationConfigs (sequence of dicts) Parameters for the location modules """ L4Params = { "columnCount": 150, "cellsPerColumn": 16, "basalInputSize": (len(locationConfigs) * sum( np.prod(config["cellDimensions"]) for config in locationConfigs)) } if L4Overrides is not None: L4Params.update(L4Overrides) self.L4 = ApicalTiebreakPairMemory(**L4Params) self.L6aModules = [ Superficial2DLocationModule( anchorInputSize=self.L4.numberOfCells(), **config) for config in locationConfigs ] def movementCompute(self, displacement, noiseFactor=0, moduleNoiseFactor=0): """ @param displacement (dict) The change in location. Example: {"top": 10, "left", 10} @return (dict) Data for logging/tracing. """ if noiseFactor != 0: xdisp = np.random.normal(0, noiseFactor) ydisp = np.random.normal(0, noiseFactor) else: xdisp = 0 ydisp = 0 locationParams = { "displacement": [displacement["top"] + ydisp, displacement["left"] + xdisp], "noiseFactor": moduleNoiseFactor } for module in self.L6aModules: module.movementCompute(**locationParams) return locationParams def sensoryCompute(self, activeMinicolumns, learn): """ @param activeMinicolumns (numpy array) List of indices of minicolumns to activate. @param learn (bool) If True, the two layers should learn this association. @return (tuple of dicts) Data for logging/tracing. """ inputParams = { "activeColumns": activeMinicolumns, "basalInput": self.getLocationRepresentation(), "learn": learn } self.L4.compute(**inputParams) locationParams = { "anchorInput": self.L4.getActiveCells(), "anchorGrowthCandidates": self.L4.getWinnerCells(), "learn": learn, } for module in self.L6aModules: module.sensoryCompute(**locationParams) return (inputParams, locationParams) def reset(self): """ Clear all cell activity. """ self.L4.reset() for module in self.L6aModules: module.reset() def activateRandomLocation(self): """ Activate a random location in the location layer. """ for module in self.L6aModules: module.activateRandomLocation() def getSensoryRepresentation(self): """ Gets the active cells in the sensory layer. """ return self.L4.getActiveCells() def getLocationRepresentation(self): """ Get the full population representation of the location layer. """ activeCells = np.array([], dtype="uint32") totalPrevCells = 0 for module in self.L6aModules: activeCells = np.append(activeCells, module.getActiveCells() + totalPrevCells) totalPrevCells += module.numberOfCells() return activeCells
def main(): DIR = "./sim_data" # Odom Encoder xSDR = ScalarEncoder(w=21,minval=0,maxval=20,n=256) ySDR = ScalarEncoder(w=21,minval=0,maxval=20,n=256) xyWidth = xSDR.getWidth() + ySDR.getWidth() # Visual input D = np.loadtxt(DIR + '/seq_multi_loop_noise05_al5.txt', dtype='i', delimiter=',') numberImages = D[:,0].size nColumns = D[0,:].size #time.sleep(10) # Odom input odom = np.loadtxt(DIR + '/seq_multi_loop_noise05_al5_gt.txt', dtype='f', delimiter=',') x = odom[:,0] y = odom[:,1] # Encoder Odom input odomSDR = np.zeros((numberImages,xyWidth), dtype=int) for i in range(1): _xSDR = np.zeros(xSDR.getWidth(), dtype=int) xSDR.encodeIntoArray(x[i], _xSDR) _ySDR = np.zeros(ySDR.getWidth(), dtype=int) ySDR.encodeIntoArray(y[i], _ySDR) odomSDR[i,:] = np.concatenate([_xSDR, _ySDR]) tm0 = TM( columnCount=nColumns, cellsPerColumn=4, initialPermanence=0.21, connectedPermanence=0.5, permanenceIncrement=0.1, permanenceDecrement=0.1, minThreshold=15, basalInputSize= 512, reducedBasalThreshold=1000, activationThreshold=1000, apicalInputSize=0, maxSynapsesPerSegment=-1, sampleSize=1, seed = 42 ) tm = TemporalMemory( # Must be the same dimensions as the SP columnDimensions=(2048,), # How many cells in each mini-column. cellsPerColumn=4, # A segment is active if it has >= activationThreshold connected synapses # that are active due to infActiveState activationThreshold=13, initialPermanence=0.21, connectedPermanence=0.5, # Minimum number of active synapses for a segment to be considered during # search for the best-matching segments. minThreshold=1, # The max number of synapses added to a segment during learning maxNewSynapseCount=3, #permanenceIncrement=0.01, #permanenceDecrement=0.01, predictedSegmentDecrement=0.0005, maxSegmentsPerCell=3, maxSynapsesPerSegment=3, seed=42 ) #time.sleep(10) # Simple HTM parameters params = Params() params.maxPredDepth = 0 params.probAdditionalCon = 0.05 # probability for random connection params.nCellPerCol = 32 # number of cells per minicolumn params.nInConPerCol = int(round(np.count_nonzero(D) / D.shape[0])) #print params.nInConPerCol params.minColumnActivity = int(round(0.25*params.nInConPerCol)) params.nColsPerPattern = 10 # minimum number of active minicolumns k_min params.kActiveColumn = 100 # maximum number of active minicolumns k_max params.kMin = 1 # run HTM t = time.time() print ('Simple HTM') htm = MCN('htm',params) outputSDR = [] max_index = [] for i in range (min(numberImages,D.shape[0])): loop = 0 #print('\n-------- ITERATION %d ---------' %i) # skip empty vectors if np.count_nonzero(D[i,:]) == 0: print('empty vector, skip\n') continue loop += 1 #print D[i,:] htm.compute(D[i,:]) max_index.append(max(htm.winnerCells)) outputSDR.append(htm.winnerCells) elapsed = time.time() - t print("Elapsed time: %f seconds\n" %elapsed) # create output SDR matrix from HTM winner cell output M = np.zeros((len(outputSDR),max(max_index)+1), dtype=int) for i in range(len(outputSDR)): for j in range(len(outputSDR[i])): winner = outputSDR[i][j] M[i][winner] = 1 # Temporal Pooler descriptors print 'Temporal Pooler descriptors' D1_tm=[] id_max1=[] t = time.time() for i in range(min(numberImages,D.shape[0])): D1_sp = np.nonzero(D[i,:])[0] tm.compute(D1_sp, learn=True) activeCells = tm.getWinnerCells() D1_tm.append(activeCells) id_max1.append(max(activeCells)) elapsed = time.time() - t print( "Elapsed time: %f seconds\n" %elapsed) # create output SDR matrix from HTM winner cell output T = np.zeros((len(D1_tm),max(id_max1)+1), dtype=int) for i in range(len(D1_tm)): for j in range(len(D1_tm[i])): winner = D1_tm[i][j] T[i][winner] = 1 # Temporal Pooler - Distal connections print 'Temporal Pooler - Distal connections' D2_tm=[] id_max2=[] t = time.time() for i in range(min(numberImages,D.shape[0])): D2_sp = np.nonzero(D[i,:])[0] basalInputs = np.nonzero(odomSDR[i,:])[0] tm0.compute(sorted(D2_sp), sorted(basalInputs), apicalInput=(), basalGrowthCandidates=None, apicalGrowthCandidates=None, learn=True) activeCells2 = tm0.getWinnerCells() D2_tm.append(activeCells2) id_max2.append(max(activeCells2)) elapsed = time.time() - t print( "Elapsed time: %f seconds\n" %elapsed) # create output SDR matrix from HTM winner cell output T2 = np.zeros((len(D2_tm),max(id_max2)+1), dtype=int) for i in range(len(D2_tm)): for j in range(len(D2_tm[i])): winner = D2_tm[i][j] T2[i][winner] = 1 # Create ground truth and show precision-recall curves GT_data = np.loadtxt(DIR + '/seq_multi_loop_noNoise_gt.txt', dtype='i', delimiter=',',skiprows=1) GT = np.zeros((numberImages,numberImages), dtype=int) for i in range(GT.shape[0]): for j in range(i,GT.shape[1]): GT[i,j] = (np.any(GT_data[i,:] != GT_data[j,:])==False) # Results print ('Results') fig, ax = plt.subplots() S0 = evaluateSimilarity(D) P, R = createPR(S0,GT) ax.plot(R, P, label='InputSDR: (avgP=%f)' %np.trapz(P,R)) S1 = evaluateSimilarity(M) P, R = createPR(S1,GT) ax.plot(R, P, label='MCN (avgP=%f)' %np.trapz(P,R)) S2 = evaluateSimilarity(T) P, R = createPR(S2,GT) ax.plot(R, P, label='HTM (avgP=%f)' %np.trapz(P,R)) S3 = evaluateSimilarity(T2) P, R = createPR(S3,GT) ax.plot(R, P, label='HTM Distal (avgP=%f)' %np.trapz(P,R)) ax.legend() ax.grid(True) plt.xlabel("Recall") plt.ylabel("Precision") plt.show() '''
def __init__(self, objects, objectPlacements, featureNames, locationConfigs, numCorticalColumns, worldDimensions, featureW=15, cellsPerColumn=32): self.objects = objects self.objectPlacements = objectPlacements self.numCorticalColumns = numCorticalColumns self.worldDimensions = worldDimensions self.locationConfigs = locationConfigs self.features = dict( ((iCol, k), np.array(sorted(random.sample(xrange(150), featureW)), dtype="uint32")) for k in featureNames for iCol in xrange(numCorticalColumns)) self.corticalColumns = [] for _ in xrange(numCorticalColumns): inputLayer = ApicalTiebreakPairMemory(**{ "columnCount": 150, "cellsPerColumn": cellsPerColumn, "initialPermanence": 1.0, "basalInputSize": sum( np.prod(config["cellDimensions"]) for config in locationConfigs), "apicalInputSize": 4096, "seed": random.randint(0,2048)}) objectLayer = ColumnPooler(**{ "inputWidth": 150 * cellsPerColumn, "initialProximalPermanence": 1.0, "initialProximalPermanence": 1.0, "lateralInputWidths": [4096] * (numCorticalColumns - 1), "seed": random.randint(0,2048)}) sensorToBodyModules = [SensorToBodyModule2D(**config) for config in locationConfigs] sensorToSpecificObjectModules = [ SensorToSpecificObjectModule(**{ "cellDimensions": config["cellDimensions"], "anchorInputSize": inputLayer.numberOfCells(), "initialPermanence": 1.0, "seed": random.randint(0,2048)}) for config in locationConfigs] self.corticalColumns.append( CorticalColumn(inputLayer, objectLayer, sensorToBodyModules, sensorToSpecificObjectModules)) self.bodyToSpecificObjectModules = [] for iModule, config in enumerate(locationConfigs): module = BodyToSpecificObjectModule2D(config["cellDimensions"]) pairedSensorModules = [c.sensorToSpecificObjectModules[iModule] for c in self.corticalColumns] module.formReciprocalSynapses(pairedSensorModules) self.bodyToSpecificObjectModules.append(module) self.maxSettlingTime = 10 self.monitors = {} self.nextMonitorToken = 1
class Grid2DLocationExperiment(object): """ The experiment code organized into a class. """ def __init__(self, objects, objectPlacements, featureNames, locationConfigs, worldDimensions): self.objects = objects self.objectPlacements = objectPlacements self.worldDimensions = worldDimensions self.features = dict( (k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32")) for k in featureNames) self.locationModules = [ SuperficialLocationModule2D(anchorInputSize=150 * 32, **config) for config in locationConfigs ] self.inputLayer = ApicalTiebreakPairMemory( **{ "columnCount": 150, "cellsPerColumn": 32, "basalInputSize": 18 * sum( np.prod(config["cellDimensions"]) for config in locationConfigs), "apicalInputSize": 4096 }) self.objectLayer = ColumnPooler(**{"inputWidth": 150 * 32}) # Use these for classifying SDRs and for testing whether they're correct. self.locationRepresentations = { # Example: # (objectName, (top, left)): [0, 26, 54, 77, 101, ...] } self.inputRepresentations = { # Example: # (objectName, (top, left), featureName): [0, 26, 54, 77, 101, ...] } self.objectRepresentations = { # Example: # objectName: [14, 19, 54, 107, 201, ...] } self.locationInWorld = None self.maxSettlingTime = 10 self.monitors = {} self.nextMonitorToken = 1 def addMonitor(self, monitor): """ Subscribe to Grid2DLocationExperimentMonitor events. @param monitor (Grid2DLocationExperimentMonitor) An object that implements a set of monitor methods @return (object) An opaque object that can be used to refer to this monitor. """ token = self.nextMonitorToken self.nextMonitorToken += 1 self.monitors[token] = monitor return token def removeMonitor(self, monitorToken): """ Unsubscribe from LocationExperiment events. @param monitorToken (object) The return value of addMonitor() from when this monitor was added """ del self.monitors[monitorToken] def getActiveLocationCells(self): activeCells = np.array([], dtype="uint32") totalPrevCells = 0 for i, module in enumerate(self.locationModules): activeCells = np.append(activeCells, module.getActiveCells() + totalPrevCells) totalPrevCells += module.numberOfCells() return activeCells def move(self, objectName, locationOnObject): objectPlacement = self.objectPlacements[objectName] locationInWorld = (objectPlacement[0] + locationOnObject[0], objectPlacement[1] + locationOnObject[1]) if self.locationInWorld is not None: deltaLocation = (locationInWorld[0] - self.locationInWorld[0], locationInWorld[1] - self.locationInWorld[1]) for monitor in self.monitors.values(): monitor.beforeMove(deltaLocation) params = {"deltaLocation": deltaLocation} for module in self.locationModules: module.shift(**params) for monitor in self.monitors.values(): monitor.afterLocationShift(**params) self.locationInWorld = locationInWorld for monitor in self.monitors.values(): monitor.afterWorldLocationChanged(locationInWorld) def _senseInferenceMode(self, featureSDR): prevCellActivity = None for i in xrange(self.maxSettlingTime): inputParams = { "activeColumns": featureSDR, "basalInput": self.getActiveLocationCells(), "apicalInput": self.objectLayer.getActiveCells(), "learn": False } self.inputLayer.compute(**inputParams) objectParams = { "feedforwardInput": self.inputLayer.getActiveCells(), "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(), "learn": False, } self.objectLayer.compute(**objectParams) locationParams = {"anchorInput": self.inputLayer.getActiveCells()} for module in self.locationModules: module.anchor(**locationParams) cellActivity = (set(self.objectLayer.getActiveCells()), set(self.inputLayer.getActiveCells()), set(self.getActiveLocationCells())) if cellActivity == prevCellActivity: # It settled. Don't even log this timestep. break else: prevCellActivity = cellActivity for monitor in self.monitors.values(): if i > 0: monitor.markSensoryRepetition() monitor.afterInputCompute(**inputParams) monitor.afterObjectCompute(**objectParams) monitor.afterLocationAnchor(**locationParams) def _senseLearningMode(self, featureSDR): inputParams = { "activeColumns": featureSDR, "basalInput": self.getActiveLocationCells(), "apicalInput": self.objectLayer.getActiveCells(), "learn": True } self.inputLayer.compute(**inputParams) objectParams = { "feedforwardInput": self.inputLayer.getActiveCells(), "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(), "learn": True, } self.objectLayer.compute(**objectParams) locationParams = {"anchorInput": self.inputLayer.getWinnerCells()} for module in self.locationModules: module.learn(**locationParams) for monitor in self.monitors.values(): monitor.afterInputCompute(**inputParams) monitor.afterObjectCompute(**objectParams) def sense(self, featureSDR, learn): for monitor in self.monitors.values(): monitor.beforeSense(featureSDR) if learn: self._senseLearningMode(featureSDR) else: self._senseInferenceMode(featureSDR) def learnObjects(self): """ Learn each provided object. This method simultaneously learns 4 sets of synapses: - location -> input - input -> location - input -> object - object -> input """ for objectName, objectFeatures in self.objects.iteritems(): self.reset() for module in self.locationModules: module.activateRandomLocation() for feature in objectFeatures: locationOnObject = (feature["top"] + feature["height"] / 2, feature["left"] + feature["width"] / 2) self.move(objectName, locationOnObject) featureName = feature["name"] featureSDR = self.features[featureName] for _ in xrange(10): self.sense(featureSDR, learn=True) self.locationRepresentations[( objectName, locationOnObject)] = (self.getActiveLocationCells()) self.inputRepresentations[( objectName, locationOnObject, featureName)] = (self.inputLayer.getActiveCells()) self.objectRepresentations[ objectName] = self.objectLayer.getActiveCells() def inferObjectsWithRandomMovements(self): """ Infer each object without any location input. """ for objectName, objectFeatures in self.objects.iteritems(): self.reset() inferred = False prevTouchSequence = None for _ in xrange(4): while True: touchSequence = list(objectFeatures) random.shuffle(touchSequence) if prevTouchSequence is not None: if touchSequence[0] == prevTouchSequence[-1]: continue break for i, feature in enumerate(touchSequence): locationOnObject = (feature["top"] + feature["height"] / 2, feature["left"] + feature["width"] / 2) self.move(objectName, locationOnObject) featureName = feature["name"] featureSDR = self.features[featureName] self.sense(featureSDR, learn=False) inferred = ( set(self.objectLayer.getActiveCells()) == set( self.objectRepresentations[objectName]) and set(self.inputLayer.getActiveCells()) == set( self.inputRepresentations[(objectName, locationOnObject, featureName)]) and set(self.getActiveLocationCells()) == set( self.locationRepresentations[(objectName, locationOnObject)])) if inferred: break prevTouchSequence = touchSequence if inferred: break def reset(self): for module in self.locationModules: module.reset() self.objectLayer.reset() self.inputLayer.reset() self.locationInWorld = None for monitor in self.monitors.values(): monitor.afterReset()
class RelationalMemory(object): def __init__(self, l4N, l4W, numModules, moduleDimensions, maxActivePerModule, l6ActivationThreshold): self.numModules = numModules self.moduleDimensions = moduleDimensions self._cellsPerModule = np.prod(moduleDimensions) self.maxActivePerModule = maxActivePerModule self.l4N = l4N self.l4W = l4W self.l6ActivationThreshold = l6ActivationThreshold self.l4TM = TemporalMemory( columnCount=l4N, basalInputSize=numModules*self._cellsPerModule, cellsPerColumn=4, #activationThreshold=int(numModules / 2) + 1, #reducedBasalThreshold=int(numModules / 2) + 1, activationThreshold=1, reducedBasalThreshold=1, initialPermanence=1.0, connectedPermanence=0.5, minThreshold=1, sampleSize=numModules, permanenceIncrement=1.0, permanenceDecrement=0.0, ) self.l6Connections = [Connections(numCells=self._cellsPerModule) for _ in xrange(numModules)] self.pooler = ColumnPooler( inputWidth=self.numModules*self._cellsPerModule, ) self.classifier = KNNClassifier(k=1, distanceMethod="rawOverlap") #self.classifier = KNNClassifier(k=1, distanceMethod="norm") # Active state self.activeL6Cells = [[] for _ in xrange(numModules)] self.activeL5Cells = [[] for _ in xrange(numModules)] self.predictedL6Cells = [set([]) for _ in xrange(numModules)] # Debug state self.activeL6BeforeMotor = [[] for _ in xrange(numModules)] self.l6ToL4Map = collections.defaultdict(list) def reset(self): self.activeL6Cells = [[] for _ in xrange(self.numModules)] self.activeL5Cells = [[] for _ in xrange(self.numModules)] self.predictedL6Cells = [set([]) for _ in xrange(self.numModules)] self.l4TM.reset() self.pooler.reset() def trainFeatures(self, sensoryInputs): # Randomly assign bilateral connections and zero others for sense in sensoryInputs: # Choose L6 cells randomly activeL6Cells = [[np.random.randint(self._cellsPerModule)] for _ in xrange(self.numModules)] l4BasalInput = getGlobalIndices(activeL6Cells, self._cellsPerModule) # Learn L6->L4 connections self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True) self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True) self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True) self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True) activeL4Cells = self.l4TM.getActiveCells() # Debug: store the map for l6Cell in itertools.chain(*activeL6Cells): self.l6ToL4Map[l6Cell].extend(activeL4Cells) # Learn L4->L6 connections for l6Cells, connections in zip(activeL6Cells, self.l6Connections): # Assumes one cell active per L6 module when training features segment = connections.createSegment(l6Cells[0]) for l4Cell in activeL4Cells: connections.createSynapse(segment, l4Cell, 1.0) def compute(self, ff, motor, objClass, outputFile): """Run one iteration of the online sensorimotor algorithm. This function has three stages: - The FEEDFORWARD pass drives Prerequisites: `trainFeatures` must have been run already :param ff: feedforward sensory input :param motor: the motor command for next move, in the form of delta coordinates :param objClass: the object class to train the classifier, or None if not learning """ delta = motor # FEEDFORWARD # Determine active feature representation in l4, using lateral input # from l6 previous step feedback l4BasalInput = getGlobalIndices(self.predictedL6Cells, self._cellsPerModule) self.l4TM.compute(activeColumns=ff, basalInput=l4BasalInput, learn=False) predictedL4Cells = self.l4TM.getPredictedCells() activeL4Cells = self.l4TM.getActiveCells() # Drive L6 activation from l4 for m, connections in enumerate(self.l6Connections): newCells = [] activeConnectedPerSegment = connections.computeActivity(activeL4Cells, 0.5)[0] for flatIdx, activeConnected in enumerate(activeConnectedPerSegment): if activeConnected >= self.l6ActivationThreshold: cellIdx = connections.segmentForFlatIdx(flatIdx).cell newCells.append(cellIdx) #for cell in newCells: # print connections.segmentsForCell(cell) #print newCells #assert len(newCells) <= 1 self.activeL6Cells[m].insert(0, newCells) # TODO: This is the number of steps, not necessarily the number of cells lenBefore = len(self.activeL6Cells[m]) del self.activeL6Cells[m][self.maxActivePerModule:] lenAfter = len(self.activeL6Cells[m]) #assert lenBefore == lenAfter, "Debug assert to check that we aren't hitting limit on L6 activity. Can remove when we set max active low enough relative to object size (times number of train/test iterations)" self.activeL6BeforeMotor = [list(itertools.chain(*l6Module)) for l6Module in self.activeL6Cells] # Replace l5 activity with new transforms self.activeL5Cells = [] for activeL6Module in self.activeL6Cells: transforms = set() for newCell in activeL6Module[0]: for prevCell in itertools.chain(*activeL6Module[1:]): if newCell == prevCell: continue # Transform from prev to new t1 = bind(prevCell, newCell, self.moduleDimensions) transforms.add(t1) # Transform from new to prev t2 = bind(newCell, prevCell, self.moduleDimensions) transforms.add(t2) self.activeL5Cells.append(list(transforms)) # Pool into object representation classifierLearn = True if objClass is not None else False globalL5ActiveCells = sorted(getGlobalIndices(self.activeL5Cells, self._cellsPerModule)) self.pooler.compute(feedforwardInput=globalL5ActiveCells, learn=classifierLearn, predictedInput=globalL5ActiveCells) # Classifier classifierInput = np.zeros((self.pooler.numberOfCells(),), dtype=np.uint32) classifierInput[self.pooler.getActiveCells()] = 1 #print classifierInput.nonzero() #print self.pooler.getActiveCells() #print self.prediction = self.classifier.infer(classifierInput) if objClass is not None: self.classifier.learn(classifierInput, objClass) # MOTOR # Update L6 based on motor command numActivePerModuleBefore = [sum([len(cells) for cells in active]) for active in self.activeL6Cells] self.activeL6Cells = [ [[pathIntegrate(c, self.moduleDimensions, delta) for c in steps] for steps in prevActiveCells] for prevActiveCells in self.activeL6Cells] numActivePerModuleAfter = [sum([len(cells) for cells in active]) for active in self.activeL6Cells] assert numActivePerModuleAfter == numActivePerModuleBefore # FEEDBACK # Get all transforms associated with object # TODO: Get transforms from object in addition to current activity predictiveTransforms = [l5Active for l5Active in self.activeL5Cells] # Get set of predicted l6 representations (including already active) # and store them for next step l4 compute self.predictedL6Cells = [] for l6, l5 in itertools.izip(self.activeL6Cells, predictiveTransforms): predictedCells = [] for activeL6Cell in set(itertools.chain(*l6)): for activeL5Cell in l5: predictedCell = unbind(activeL6Cell, activeL5Cell, self.moduleDimensions) predictedCells.append(predictedCell) self.predictedL6Cells.append(set( list(itertools.chain(*l6)) + predictedCells)) # Log this step if outputFile: log = RelationalMemoryLog.new_message() log.ts = time.time() sensationProto = log.init("sensation", len(ff)) for i in xrange(len(ff)): sensationProto[i] = int(ff[i]) predictedL4Proto = log.init("predictedL4", len(predictedL4Cells)) for i in xrange(len(predictedL4Cells)): predictedL4Proto[i] = int(predictedL4Cells[i]) activeL4Proto = log.init("activeL4", len(activeL4Cells)) for i in xrange(len(activeL4Cells)): activeL4Proto[i] = int(activeL4Cells[i]) activeL6HistoryProto = log.init("activeL6History", len(self.activeL6Cells)) for i in xrange(len(self.activeL6Cells)): activeL6ModuleProto = activeL6HistoryProto.init(i, len(self.activeL6Cells[i])) for j in xrange(len(self.activeL6Cells[i])): activeL6ModuleStepProto = activeL6ModuleProto.init(j, len(self.activeL6Cells[i][j])) for k in xrange(len(self.activeL6Cells[i][j])): activeL6ModuleStepProto[k] = int(self.activeL6Cells[i][j][k]) activeL5Proto = log.init("activeL5", len(self.activeL5Cells)) for i in xrange(len(self.activeL5Cells)): activeL5ModuleProto = activeL5Proto.init(i, len(self.activeL5Cells[i])) for j in xrange(len(self.activeL5Cells[i])): activeL5ModuleProto[j] = int(self.activeL5Cells[i][j]) classifierResults = [(i, distance) for i, distance in enumerate(self.prediction[2]) if distance is not None] classifierResultsProto = log.init("classifierResults", len(classifierResults)) for i in xrange(len(classifierResults)): classifierResultProto = classifierResultsProto[i] classifierResultProto.label = classifierResults[i][0] classifierResultProto.distance = float(classifierResults[i][1]) motorDeltaProto = log.init("motorDelta", len(delta)) for i in xrange(len(delta)): motorDeltaProto[i] = int(delta[i]) predictedL6Proto = log.init("predictedL6", len(self.predictedL6Cells)) for i in xrange(len(self.predictedL6Cells)): predictedL6ModuleProto = predictedL6Proto.init(i, len(self.predictedL6Cells[i])) for j, c in enumerate(self.predictedL6Cells[i]): predictedL6ModuleProto[j] = int(c) json.dump(log.to_dict(), outputFile) outputFile.write("\n")