class ApicalTiebreakTM_ApicalTiebreakTests(ApicalTiebreakTestBase,
                                           unittest.TestCase):
  """
  Run the "apical tiebreak" tests on the ApicalTiebreakTemporalMemory.
  """

  def constructTM(self, columnCount, basalInputSize, apicalInputSize,
                  cellsPerColumn, initialPermanence, connectedPermanence,
                  minThreshold, sampleSize, permanenceIncrement,
                  permanenceDecrement, predictedSegmentDecrement,
                  activationThreshold, seed):

    params = {
      "columnCount": columnCount,
      "cellsPerColumn": cellsPerColumn,
      "initialPermanence": initialPermanence,
      "connectedPermanence": connectedPermanence,
      "minThreshold": minThreshold,
      "sampleSize": sampleSize,
      "permanenceIncrement": permanenceIncrement,
      "permanenceDecrement": permanenceDecrement,
      "basalPredictedSegmentDecrement": predictedSegmentDecrement,
      "apicalPredictedSegmentDecrement": 0.0,
      "activationThreshold": activationThreshold,
      "seed": seed,
      "basalInputSize": basalInputSize,
      "apicalInputSize": apicalInputSize,
    }

    self.tm = ApicalTiebreakPairMemory(**params)


  def compute(self, activeColumns, basalInput, apicalInput, learn):
    activeColumns = np.array(sorted(activeColumns), dtype="uint32")
    basalInput = np.array(sorted(basalInput), dtype="uint32")
    apicalInput = np.array(sorted(apicalInput), dtype="uint32")

    self.tm.compute(activeColumns,
                    basalInput=basalInput,
                    basalGrowthCandidates=basalInput,
                    apicalInput=apicalInput,
                    apicalGrowthCandidates=apicalInput,
                    learn=learn)


  def getActiveCells(self):
    return self.tm.getActiveCells()


  def getPredictedCells(self):
    return self.tm.getPredictedCells()
Esempio n. 2
0
class ApicalTiebreakTM_ApicalTiebreakTests(ApicalTiebreakTestBase,
                                           unittest.TestCase):
    """
  Run the "apical tiebreak" tests on the ApicalTiebreakTemporalMemory.
  """
    def constructTM(self, columnCount, basalInputSize, apicalInputSize,
                    cellsPerColumn, initialPermanence, connectedPermanence,
                    minThreshold, sampleSize, permanenceIncrement,
                    permanenceDecrement, predictedSegmentDecrement,
                    activationThreshold, seed):

        params = {
            "columnCount": columnCount,
            "cellsPerColumn": cellsPerColumn,
            "initialPermanence": initialPermanence,
            "connectedPermanence": connectedPermanence,
            "minThreshold": minThreshold,
            "sampleSize": sampleSize,
            "permanenceIncrement": permanenceIncrement,
            "permanenceDecrement": permanenceDecrement,
            "basalPredictedSegmentDecrement": predictedSegmentDecrement,
            "apicalPredictedSegmentDecrement": 0.0,
            "activationThreshold": activationThreshold,
            "seed": seed,
            "basalInputSize": basalInputSize,
            "apicalInputSize": apicalInputSize,
        }

        self.tm = ApicalTiebreakPairMemory(**params)

    def compute(self, activeColumns, basalInput, apicalInput, learn):
        activeColumns = np.array(sorted(activeColumns), dtype="uint32")
        basalInput = np.array(sorted(basalInput), dtype="uint32")
        apicalInput = np.array(sorted(apicalInput), dtype="uint32")

        self.tm.compute(activeColumns,
                        basalInput=basalInput,
                        basalGrowthCandidates=basalInput,
                        apicalInput=apicalInput,
                        apicalGrowthCandidates=apicalInput,
                        learn=learn)

    def getActiveCells(self):
        return self.tm.getActiveCells()

    def getPredictedCells(self):
        return self.tm.getPredictedCells()
Esempio n. 3
0
class PoolOfPairsLocation1DExperiment(object):
  """
  There are a lot of ways this experiment could choose to associate "operands"
  with results -- e.g. we could just do it randomly. This particular experiment
  assumes there are an equal number of "operand1", "operand2", and "result"
  values. It assigns each operand/result an index, and it relates these via:

    result = (operand1 + operand2) % numLocations

  Note that this experiment would be fundamentally no different if it used
  subtraction:

    result = (operand1 - operand2) % numLocations

  The resulting network would be identical, it's just our interpretation of the
  SDRs that would change.

  This experiment intentionally mimics a 1D space with wraparound, with
  operands/results representing 1D locations and offsets. You can think of this
  as:

    location2 = location1 + offset
    offset = location2 - location1
  """

  def __init__(self,
               numLocations=25,
               numMinicolumns=15,
               numActiveMinicolumns=10,
               poolingThreshold=8,
               cellsPerColumn=8,
               segmentedProximal=True,
               segmentedPooling=True,
               minicolumnSDRs=None):

    self.numOperandCells = 100
    self.numActiveOperandCells = 4
    self.numResultCells = 100
    self.numActiveResultCells = 4
    self.numLocations = numLocations
    self.numActiveMinicolumns = numActiveMinicolumns

    self.contextOperandSDRs = createEvenlySpreadSDRs(
      numLocations, self.numOperandCells, self.numActiveOperandCells)
    self.resultSDRs = createEvenlySpreadSDRs(
      numLocations, self.numResultCells, self.numActiveResultCells)
    self.drivingOperandSDRs = createEvenlySpreadSDRs(
      numLocations, self.numOperandCells, self.numActiveOperandCells)

    if minicolumnSDRs is None:
      self.minicolumnSDRs = createEvenlySpreadSDRs(
        self.numLocations, numMinicolumns, numActiveMinicolumns)
    else:
      assert len(minicolumnSDRs) >= self.numLocations
      self.minicolumnSDRs = list(minicolumnSDRs)
      random.shuffle(self.minicolumnSDRs)

    self.minicolumnParams = {
      "cellCount": numMinicolumns,
      "inputSize": self.numOperandCells,
      "threshold": self.numActiveOperandCells,
    }
    if segmentedProximal:
      self.pairLayerProximalConnections = SegmentedForwardModel(
        **self.minicolumnParams)
    else:
      self.pairLayerProximalConnections = ForwardModel(**self.minicolumnParams)

    self.pairParams = {
      "columnCount": numMinicolumns,
      "initialPermanence": 1.0,
      "cellsPerColumn": cellsPerColumn,
      "basalInputSize": self.numOperandCells,
      "activationThreshold": self.numActiveOperandCells,
      "minThreshold": self.numActiveOperandCells,
    }
    self.pairLayer = ApicalTiebreakPairMemory(**self.pairParams)

    self.poolingParams = {
      "cellCount": self.numResultCells,
      "inputSize": self.pairLayer.numberOfCells(),
      "threshold": poolingThreshold,
    }
    if segmentedPooling:
      self.poolingLayer = SegmentedForwardModel(**self.poolingParams)
    else:
      self.poolingLayer = ForwardModel(**self.poolingParams)


  def train(self):
    """
    Train the pair layer and pooling layer.
    """
    for iDriving, cDriving in enumerate(self.drivingOperandSDRs):
      minicolumnSDR = self.minicolumnSDRs[iDriving]
      self.pairLayerProximalConnections.associate(minicolumnSDR, cDriving)
      for iContext, cContext in enumerate(self.contextOperandSDRs):
        iResult = (iContext + iDriving) % self.numLocations
        cResult = self.resultSDRs[iResult]
        self.pairLayer.compute(minicolumnSDR, basalInput=cContext)
        cPair = self.pairLayer.getWinnerCells()
        self.poolingLayer.associate(cResult, cPair)


  def trainWithSpecificPairSDRs(self, pairLayerContexts):
    """
    Train the pair layer and pooling layer, manually choosing which contexts
    each cell will encode (i.e. the pair layer's distal connections).

    @param pairLayerContexts (list of lists of lists of ints)
    iContext integers for each cell, grouped by minicolumn. For example,
      [[[1, 3], [2,4]],
       [[1, 2]]]
    would specify that cell 0 connects to location 1 and location 3, while cell
    1 connects to locations 2 and 4, and cell 2 (in the second minicolumn)
    connects to locations 1 and 2.
    """
    # Grow basal segments in the pair layer.
    for iMinicolumn, contextsByCell in enumerate(pairLayerContexts):
      for iCell, cellContexts in enumerate(contextsByCell):
        iCellAbsolute = iMinicolumn*self.pairLayer.getCellsPerColumn() + iCell
        for context in cellContexts:
          segments = self.pairLayer.basalConnections.createSegments(
            [iCellAbsolute])
          self.pairLayer.basalConnections.growSynapses(
            segments, self.contextOperandSDRs[context], 1.0)

    # Associate the pair layer's minicolumn SDRs with offset cell SDRs,
    # and associate the pooling layer's location SDRs with a pool of pair SDRs.
    for iDriving, cDriving in enumerate(self.drivingOperandSDRs):
      minicolumnSDR = self.minicolumnSDRs[iDriving]
      self.pairLayerProximalConnections.associate(minicolumnSDR, cDriving)

      for iContext, cContext in enumerate(self.contextOperandSDRs):
        iResult = (iContext + iDriving) % self.numLocations
        cResult = self.resultSDRs[iResult]
        cPair = [
          iMinicolumn*self.pairLayer.getCellsPerColumn() + iCell
          for iMinicolumn in minicolumnSDR
          for iCell, cellContexts in enumerate(pairLayerContexts[iMinicolumn])
          if iContext in cellContexts]
        assert len(cPair) == len(minicolumnSDR)

        self.poolingLayer.associate(cResult, cPair)


  def testInferenceOnUnions(self, unionSize, numTests=300):
    """
    Select a random driving operand and a random union of context operands.
    Test how well outputs a union of results.

    Perform the test multiple times with different random selections.
    """
    additionalSDRCounts = []

    for _ in xrange(numTests):
      iContexts = random.sample(xrange(self.numLocations), unionSize)
      iDriving = random.choice(xrange(self.numLocations))
      cDriving = self.drivingOperandSDRs[iDriving]

      cContext = np.unique(np.concatenate(
        [self.contextOperandSDRs[iContext]
         for iContext in iContexts]))
      cResultExpected = np.unique(np.concatenate(
        [self.resultSDRs[(iContext + iDriving) % self.numLocations]
         for iContext in iContexts]))

      self.pairLayerProximalConnections.infer(cDriving)
      minicolumnSDR = self.pairLayerProximalConnections.activeCells
      assert minicolumnSDR.size == self.numActiveMinicolumns

      self.pairLayer.compute(minicolumnSDR, basalInput=cContext, learn=False)
      self.poolingLayer.infer(self.pairLayer.getActiveCells())

      assert np.all(np.in1d(cResultExpected, self.poolingLayer.activeCells))

      additionalSDRCounts.append(
        np.setdiff1d(self.poolingLayer.activeCells,
                     cResultExpected).size / self.numActiveResultCells
      )

    return additionalSDRCounts
class PIUNCorticalColumn(object):
  """
  A L4 + L6a network. Sensory input causes minicolumns in L4 to activate,
  which drives activity in L6a. Motor input causes L6a to perform path
  integration, updating its activity, which then depolarizes cells in L4.

  Whenever the sensor moves, call movementCompute. Whenever a sensory input
  arrives, call sensoryCompute.
  """

  def __init__(self, locationConfigs, L4Overrides=None, bumpType="gaussian"):
    """
    @param L4Overrides (dict)
    Custom parameters for L4

    @param locationConfigs (sequence of dicts)
    Parameters for the location modules
    """
    self.bumpType = bumpType

    L4cellCount = 150*16
    if bumpType == "gaussian":
      self.L6aModules = [
        createRatModuleFromCellCount(
          anchorInputSize=L4cellCount,
          **config)
        for config in locationConfigs]
    elif bumpType == "gaussian2":
      self.L6aModules = [
        createRatModuleFromReadoutResolution(
          anchorInputSize=L4cellCount,
          **config)
        for config in locationConfigs]
    elif bumpType == "square":
      self.L6aModules = [
        Superficial2DLocationModule(
          anchorInputSize=L4cellCount,
          **config)
        for config in locationConfigs]
    else:
      raise ValueError("Invalid bumpType", bumpType)

    L4Params = {
      "columnCount": 150,
      "cellsPerColumn": 16,
      "basalInputSize": sum(module.numberOfCells()
                            for module in self.L6aModules)
    }

    if L4Overrides is not None:
      L4Params.update(L4Overrides)
    self.L4 = ApicalTiebreakPairMemory(**L4Params)



  def movementCompute(self, displacement, noiseFactor = 0, moduleNoiseFactor = 0):
    """
    @param displacement (dict)
    The change in location. Example: {"top": 10, "left", 10}

    @return (dict)
    Data for logging/tracing.
    """

    if noiseFactor != 0:
      xdisp = np.random.normal(0, noiseFactor)
      ydisp = np.random.normal(0, noiseFactor)
    else:
      xdisp = 0
      ydisp = 0

    locationParams = {
      "displacement": [displacement["top"] + ydisp,
                       displacement["left"] + xdisp],
      "noiseFactor": moduleNoiseFactor
    }

    for module in self.L6aModules:
      module.movementCompute(**locationParams)

    return locationParams


  def sensoryCompute(self, activeMinicolumns, learn):
    """
    @param activeMinicolumns (numpy array)
    List of indices of minicolumns to activate.

    @param learn (bool)
    If True, the two layers should learn this association.

    @return (tuple of dicts)
    Data for logging/tracing.
    """
    inputParams = {
      "activeColumns": activeMinicolumns,
      "basalInput": self.getLocationRepresentation(),
      "basalGrowthCandidates": self.getLearnableLocationRepresentation(),
      "learn": learn
    }
    self.L4.compute(**inputParams)

    locationParams = {
      "anchorInput": self.L4.getActiveCells(),
      "anchorGrowthCandidates": self.L4.getWinnerCells(),
      "learn": learn,
    }
    for module in self.L6aModules:
      module.sensoryCompute(**locationParams)

    return (inputParams, locationParams)


  def reset(self):
    """
    Clear all cell activity.
    """
    self.L4.reset()
    for module in self.L6aModules:
      module.reset()


  def activateRandomLocation(self):
    """
    Activate a random location in the location layer.
    """
    for module in self.L6aModules:
      module.activateRandomLocation()


  def getSensoryRepresentation(self):
    """
    Gets the active cells in the sensory layer.
    """
    return self.L4.getActiveCells()


  def getLocationRepresentation(self):
    """
    Get the full population representation of the location layer.
    """
    activeCells = np.array([], dtype="uint32")

    totalPrevCells = 0
    for module in self.L6aModules:
      activeCells = np.append(activeCells,
                              module.getActiveCells() + totalPrevCells)
      totalPrevCells += module.numberOfCells()

    return activeCells


  def getLearnableLocationRepresentation(self):
    """
    Get the cells in the location layer that should be associated with the
    sensory input layer representation. In some models, this is identical to the
    active cells. In others, it's a subset.
    """
    learnableCells = np.array([], dtype="uint32")

    totalPrevCells = 0
    for module in self.L6aModules:
      learnableCells = np.append(learnableCells,
                                 module.getLearnableCells() + totalPrevCells)
      totalPrevCells += module.numberOfCells()

    return learnableCells


  def getSensoryAssociatedLocationRepresentation(self):
    """
    Get the location cells in the location layer that were driven by the input
    layer (or, during learning, were associated with this input.)
    """
    cells = np.array([], dtype="uint32")

    totalPrevCells = 0
    for module in self.L6aModules:
      cells = np.append(cells,
                        module.sensoryAssociatedCells + totalPrevCells)
      totalPrevCells += module.numberOfCells()

    return cells
Esempio n. 5
0
class SingleLayerLocation2DExperiment(object):
  """
  The experiment code organized into a class.
  """

  def __init__(self, diameter, objects, featureNames):
    self.diameter = diameter

    self.objects = objects

    # A grid of location SDRs.
    self.locations = dict(
      ((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32"))
      for i in xrange(diameter)
      for j in xrange(diameter))

    # 8 transition SDRs -- one for each straight and diagonal direction.
    self.transitions = dict(
      ((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32"))
      for i in xrange(-1, 2)
      for j in xrange(-1, 2)
      if i != 0 or j != 0)

    self.features = dict(
      (k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32"))
      for k in featureNames)

    self.locationLayer = SingleLayerLocationMemory(**{
      "cellCount": 1000,
      "deltaLocationInputSize": 1000,
      "featureLocationInputSize": 150*32,
      "sampleSize": 15,
      "activationThreshold": 10,
      "learningThreshold": 8,
    })

    self.inputLayer = ApicalTiebreakPairMemory(**{
      "columnCount": 150,
      "cellsPerColumn": 32,
      "basalInputSize": 1000,
      "apicalInputSize": 4096,
    })

    self.objectLayer = ColumnPooler(**{
      "inputWidth": 150 * 32
    })

    # Use these for classifying SDRs and for testing whether they're correct.
    self.inputRepresentations = {}
    self.objectRepresentations = {}
    self.learnedObjectPlacements = {}

    self.monitors = {}
    self.nextMonitorToken = 1


  def addMonitor(self, monitor):
    """
    Subscribe to SingleLayer2DExperiment events.

    @param monitor (SingleLayer2DExperimentMonitor)
    An object that implements a set of monitor methods

    @return (object)
    An opaque object that can be used to refer to this monitor.
    """

    token = self.nextMonitorToken
    self.nextMonitorToken += 1

    self.monitors[token] = monitor

    return token


  def removeMonitor(self, monitorToken):
    """
    Unsubscribe from LocationExperiment events.

    @param monitorToken (object)
    The return value of addMonitor() from when this monitor was added
    """
    del self.monitors[monitorToken]


  def doTimestep(self, locationSDR, transitionSDR, featureSDR,
                 egocentricLocation, learn):
    """
    Run one timestep.
    """

    for monitor in self.monitors.values():
      monitor.beforeTimestep(locationSDR, transitionSDR, featureSDR,
                             egocentricLocation, learn)

    params = {
      "newLocation": locationSDR,
      "deltaLocation": transitionSDR,
      "featureLocationInput": self.inputLayer.getActiveCells(),
      "featureLocationGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
      "learn": learn,
    }
    self.locationLayer.compute(**params)
    for monitor in self.monitors.values():
      monitor.afterLocationCompute(**params)

    params = {
      "activeColumns": featureSDR,
      "basalInput": self.locationLayer.getActiveCells(),
      "apicalInput": self.objectLayer.getActiveCells(),
    }
    self.inputLayer.compute(**params)
    for monitor in self.monitors.values():
      monitor.afterInputCompute(**params)

    params = {
      "feedforwardInput": self.inputLayer.getActiveCells(),
      "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
      "learn": learn,
    }
    self.objectLayer.compute(**params)
    for monitor in self.monitors.values():
      monitor.afterObjectCompute(**params)


  def learnTransitions(self):
    """
    Train the location layer to do path integration. For every location, teach
    it each previous-location + motor command pair.
    """

    print "Learning transitions"
    for (i, j), locationSDR in self.locations.iteritems():
      print "i, j", (i, j)
      for (di, dj), transitionSDR in self.transitions.iteritems():
        i2 = i + di
        j2 = j + dj
        if (0 <= i2 < self.diameter and
            0 <= j2 < self.diameter):
          for _ in xrange(5):
            self.locationLayer.reset()
            self.locationLayer.compute(newLocation=self.locations[(i,j)])
            self.locationLayer.compute(deltaLocation=transitionSDR,
                                       newLocation=self.locations[(i2, j2)])

    self.locationLayer.reset()


  def learnObjects(self, objectPlacements):
    """
    Learn each provided object in egocentric space. Touch every location on each
    object.

    This method doesn't try move the sensor along a path. Instead it just leaps
    the sensor to each object location, resetting the location layer with each
    leap.

    This method simultaneously learns 4 sets of synapses:
    - location -> input
    - input -> location
    - input -> object
    - object -> input
    """
    for monitor in self.monitors.values():
      monitor.afterPlaceObjects(objectPlacements)

    for objectName, objectDict in self.objects.iteritems():
      self.reset()

      objectPlacement = objectPlacements[objectName]

      for locationName, featureName in objectDict.iteritems():
        egocentricLocation = (locationName[0] + objectPlacement[0],
                              locationName[1] + objectPlacement[1])

        locationSDR = self.locations[egocentricLocation]
        featureSDR = self.features[featureName]
        transitionSDR = np.empty(0)

        self.locationLayer.reset()
        self.inputLayer.reset()

        for _ in xrange(10):
          self.doTimestep(locationSDR, transitionSDR, featureSDR,
                          egocentricLocation, learn=True)

        self.inputRepresentations[(featureName, egocentricLocation)] = (
          self.inputLayer.getActiveCells())

      self.objectRepresentations[objectName] = self.objectLayer.getActiveCells()
      self.learnedObjectPlacements[objectName] = objectPlacement


  def _selectTransition(self, allocentricLocation, objectDict, visitCounts):
    """
    Choose the transition that lands us in the location we've touched the least
    often. Break ties randomly, i.e. choose the first candidate in a shuffled
    list.
    """

    candidates = list(transition
                      for transition in self.transitions.keys()
                      if (allocentricLocation[0] + transition[0],
                          allocentricLocation[1] + transition[1]) in objectDict)
    random.shuffle(candidates)

    selectedVisitCount = None
    selectedTransition = None
    selectedAllocentricLocation = None

    for transition in candidates:
      candidateLocation = (allocentricLocation[0] + transition[0],
                           allocentricLocation[1] + transition[1])

      if (selectedVisitCount is None or
          visitCounts[candidateLocation] < selectedVisitCount):
        selectedVisitCount = visitCounts[candidateLocation]
        selectedTransition = transition
        selectedAllocentricLocation = candidateLocation

    return selectedAllocentricLocation, selectedTransition


  def inferObject(self, objectPlacements, objectName, startPoint,
                  transitionSequence, settlingTime=2):
    for monitor in self.monitors.values():
      monitor.afterPlaceObjects(objectPlacements)

    objectDict = self.objects[objectName]

    self.reset()

    allocentricLocation = startPoint
    nextTransitionSDR = np.empty(0, dtype="uint32")

    transitionIterator = iter(transitionSequence)

    try:
      while True:
        featureName = objectDict[allocentricLocation]
        egocentricLocation = (allocentricLocation[0] +
                              objectPlacements[objectName][0],
                              allocentricLocation[1] +
                              objectPlacements[objectName][1])
        featureSDR = self.features[featureName]

        steps = ([nextTransitionSDR] +
                 [np.empty(0)]*settlingTime)
        for transitionSDR in steps:
          self.doTimestep(np.empty(0), transitionSDR, featureSDR,
                          egocentricLocation, learn=False)

        transitionName = transitionIterator.next()
        allocentricLocation = (allocentricLocation[0] + transitionName[0],
                               allocentricLocation[1] + transitionName[1])
        nextTransitionSDR = self.transitions[transitionName]
    except StopIteration:
      pass


  def inferObjectsWithRandomMovements(self, objectPlacements, maxTouches=20,
                                      settlingTime=2):
    """
    Infer each object without any location input.
    """

    for monitor in self.monitors.values():
      monitor.afterPlaceObjects(objectPlacements)

    for objectName, objectDict in self.objects.iteritems():
      self.reset()

      visitCounts = defaultdict(int)

      learnedObjectPlacement = self.learnedObjectPlacements[objectName]

      allocentricLocation = random.choice(objectDict.keys())
      nextTransitionSDR = np.empty(0, dtype="uint32")

      # Traverse the object until it is inferred.
      success = False

      for _ in xrange(maxTouches):
        featureName = objectDict[allocentricLocation]
        egocentricLocation = (allocentricLocation[0] +
                              objectPlacements[objectName][0],
                              allocentricLocation[1] +
                              objectPlacements[objectName][1])
        featureSDR = self.features[featureName]

        steps = ([nextTransitionSDR] +
                 [np.empty(0)]*settlingTime)
        for transitionSDR in steps:
          self.doTimestep(np.empty(0), transitionSDR, featureSDR,
                          egocentricLocation, learn=False)

        visitCounts[allocentricLocation] += 1

        # We should eventually infer the egocentric location where we originally
        # learned this location on the object.
        learnedEgocentricLocation = (
          allocentricLocation[0] + learnedObjectPlacement[0],
          allocentricLocation[1] + learnedObjectPlacement[1])

        if (set(self.objectLayer.getActiveCells()) ==
            set(self.objectRepresentations[objectName]) and

            set(self.inputLayer.getActiveCells()) ==
            set(self.inputRepresentations[(featureName,
                                           learnedEgocentricLocation)]) and

            set(self.locationLayer.getActiveCells()) ==
            set(self.locations[learnedEgocentricLocation])):
          success = True
          break
        else:
          allocentricLocation, transitionName = self._selectTransition(
            allocentricLocation, objectDict, visitCounts)
          nextTransitionSDR = self.transitions[transitionName]


  def reset(self):
    self.locationLayer.reset()
    self.objectLayer.reset()
    self.inputLayer.reset()

    for monitor in self.monitors.values():
      monitor.afterReset()
class PIUNCorticalColumn(object):
    """
  A L4 + L6a network. Sensory input causes minicolumns in L4 to activate,
  which drives activity in L6a. Motor input causes L6a to perform path
  integration, updating its activity, which then depolarizes cells in L4.

  Whenever the sensor moves, call movementCompute. Whenever a sensory input
  arrives, call sensoryCompute.
  """
    def __init__(self, locationConfigs, L4Overrides=None, bumpType="gaussian"):
        """
    @param L4Overrides (dict)
    Custom parameters for L4

    @param locationConfigs (sequence of dicts)
    Parameters for the location modules
    """
        self.bumpType = bumpType

        L4cellCount = 150 * 16
        if bumpType == "gaussian":
            self.L6aModules = [
                createRatModuleFromCellCount(anchorInputSize=L4cellCount,
                                             **config)
                for config in locationConfigs
            ]
        elif bumpType == "gaussian2":
            self.L6aModules = [
                createRatModuleFromReadoutResolution(
                    anchorInputSize=L4cellCount, **config)
                for config in locationConfigs
            ]
        elif bumpType == "square":
            self.L6aModules = [
                Superficial2DLocationModule(anchorInputSize=L4cellCount,
                                            **config)
                for config in locationConfigs
            ]
        else:
            raise ValueError("Invalid bumpType", bumpType)

        L4Params = {
            "columnCount":
            150,
            "cellsPerColumn":
            16,
            "basalInputSize":
            sum(module.numberOfCells() for module in self.L6aModules)
        }

        if L4Overrides is not None:
            L4Params.update(L4Overrides)
        self.L4 = ApicalTiebreakPairMemory(**L4Params)

    def movementCompute(self,
                        displacement,
                        noiseFactor=0,
                        moduleNoiseFactor=0):
        """
    @param displacement (dict)
    The change in location. Example: {"top": 10, "left", 10}

    @return (dict)
    Data for logging/tracing.
    """

        if noiseFactor != 0:
            xdisp = np.random.normal(0, noiseFactor)
            ydisp = np.random.normal(0, noiseFactor)
        else:
            xdisp = 0
            ydisp = 0

        locationParams = {
            "displacement":
            [displacement["top"] + ydisp, displacement["left"] + xdisp],
            "noiseFactor":
            moduleNoiseFactor
        }

        for module in self.L6aModules:
            module.movementCompute(**locationParams)

        return locationParams

    def sensoryCompute(self, activeMinicolumns, learn):
        """
    @param activeMinicolumns (numpy array)
    List of indices of minicolumns to activate.

    @param learn (bool)
    If True, the two layers should learn this association.

    @return (tuple of dicts)
    Data for logging/tracing.
    """
        inputParams = {
            "activeColumns": activeMinicolumns,
            "basalInput": self.getLocationRepresentation(),
            "basalGrowthCandidates": self.getLearnableLocationRepresentation(),
            "learn": learn
        }
        self.L4.compute(**inputParams)

        locationParams = {
            "anchorInput": self.L4.getActiveCells(),
            "anchorGrowthCandidates": self.L4.getWinnerCells(),
            "learn": learn,
        }
        for module in self.L6aModules:
            module.sensoryCompute(**locationParams)

        return (inputParams, locationParams)

    def reset(self):
        """
    Clear all cell activity.
    """
        self.L4.reset()
        for module in self.L6aModules:
            module.reset()

    def activateRandomLocation(self):
        """
    Activate a random location in the location layer.
    """
        for module in self.L6aModules:
            module.activateRandomLocation()

    def getSensoryRepresentation(self):
        """
    Gets the active cells in the sensory layer.
    """
        return self.L4.getActiveCells()

    def getLocationRepresentation(self):
        """
    Get the full population representation of the location layer.
    """
        activeCells = np.array([], dtype="uint32")

        totalPrevCells = 0
        for module in self.L6aModules:
            activeCells = np.append(activeCells,
                                    module.getActiveCells() + totalPrevCells)
            totalPrevCells += module.numberOfCells()

        return activeCells

    def getLearnableLocationRepresentation(self):
        """
    Get the cells in the location layer that should be associated with the
    sensory input layer representation. In some models, this is identical to the
    active cells. In others, it's a subset.
    """
        learnableCells = np.array([], dtype="uint32")

        totalPrevCells = 0
        for module in self.L6aModules:
            learnableCells = np.append(
                learnableCells,
                module.getLearnableCells() + totalPrevCells)
            totalPrevCells += module.numberOfCells()

        return learnableCells

    def getSensoryAssociatedLocationRepresentation(self):
        """
    Get the location cells in the location layer that were driven by the input
    layer (or, during learning, were associated with this input.)
    """
        cells = np.array([], dtype="uint32")

        totalPrevCells = 0
        for module in self.L6aModules:
            cells = np.append(cells,
                              module.sensoryAssociatedCells + totalPrevCells)
            totalPrevCells += module.numberOfCells()

        return cells
Esempio n. 7
0
class Grid2DLocationExperiment(object):
    """
  The experiment code organized into a class.
  """
    def __init__(self, objects, objectPlacements, featureNames,
                 locationConfigs, worldDimensions):

        self.objects = objects
        self.objectPlacements = objectPlacements
        self.worldDimensions = worldDimensions

        self.features = dict(
            (k,
             np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32"))
            for k in featureNames)

        self.locationModules = [
            SuperficialLocationModule2D(anchorInputSize=150 * 32, **config)
            for config in locationConfigs
        ]

        self.inputLayer = ApicalTiebreakPairMemory(
            **{
                "columnCount":
                150,
                "cellsPerColumn":
                32,
                "basalInputSize":
                18 * sum(
                    np.prod(config["cellDimensions"])
                    for config in locationConfigs),
                "apicalInputSize":
                4096
            })

        self.objectLayer = ColumnPooler(**{"inputWidth": 150 * 32})

        # Use these for classifying SDRs and for testing whether they're correct.
        self.locationRepresentations = {
            # Example:
            # (objectName, (top, left)): [0, 26, 54, 77, 101, ...]
        }
        self.inputRepresentations = {
            # Example:
            # (objectName, (top, left), featureName): [0, 26, 54, 77, 101, ...]
        }
        self.objectRepresentations = {
            # Example:
            # objectName: [14, 19, 54, 107, 201, ...]
        }

        self.locationInWorld = None

        self.maxSettlingTime = 10

        self.monitors = {}
        self.nextMonitorToken = 1

    def addMonitor(self, monitor):
        """
    Subscribe to Grid2DLocationExperimentMonitor events.

    @param monitor (Grid2DLocationExperimentMonitor)
    An object that implements a set of monitor methods

    @return (object)
    An opaque object that can be used to refer to this monitor.
    """

        token = self.nextMonitorToken
        self.nextMonitorToken += 1

        self.monitors[token] = monitor

        return token

    def removeMonitor(self, monitorToken):
        """
    Unsubscribe from LocationExperiment events.

    @param monitorToken (object)
    The return value of addMonitor() from when this monitor was added
    """
        del self.monitors[monitorToken]

    def getActiveLocationCells(self):
        activeCells = np.array([], dtype="uint32")

        totalPrevCells = 0
        for i, module in enumerate(self.locationModules):
            activeCells = np.append(activeCells,
                                    module.getActiveCells() + totalPrevCells)
            totalPrevCells += module.numberOfCells()

        return activeCells

    def move(self, objectName, locationOnObject):
        objectPlacement = self.objectPlacements[objectName]
        locationInWorld = (objectPlacement[0] + locationOnObject[0],
                           objectPlacement[1] + locationOnObject[1])

        if self.locationInWorld is not None:
            deltaLocation = (locationInWorld[0] - self.locationInWorld[0],
                             locationInWorld[1] - self.locationInWorld[1])

            for monitor in self.monitors.values():
                monitor.beforeMove(deltaLocation)

            params = {"deltaLocation": deltaLocation}
            for module in self.locationModules:
                module.shift(**params)

            for monitor in self.monitors.values():
                monitor.afterLocationShift(**params)

        self.locationInWorld = locationInWorld
        for monitor in self.monitors.values():
            monitor.afterWorldLocationChanged(locationInWorld)

    def _senseInferenceMode(self, featureSDR):
        prevCellActivity = None
        for i in xrange(self.maxSettlingTime):
            inputParams = {
                "activeColumns": featureSDR,
                "basalInput": self.getActiveLocationCells(),
                "apicalInput": self.objectLayer.getActiveCells(),
                "learn": False
            }
            self.inputLayer.compute(**inputParams)

            objectParams = {
                "feedforwardInput":
                self.inputLayer.getActiveCells(),
                "feedforwardGrowthCandidates":
                self.inputLayer.getPredictedActiveCells(),
                "learn":
                False,
            }
            self.objectLayer.compute(**objectParams)

            locationParams = {"anchorInput": self.inputLayer.getActiveCells()}
            for module in self.locationModules:
                module.anchor(**locationParams)

            cellActivity = (set(self.objectLayer.getActiveCells()),
                            set(self.inputLayer.getActiveCells()),
                            set(self.getActiveLocationCells()))

            if cellActivity == prevCellActivity:
                # It settled. Don't even log this timestep.
                break
            else:
                prevCellActivity = cellActivity
                for monitor in self.monitors.values():
                    if i > 0:
                        monitor.markSensoryRepetition()

                    monitor.afterInputCompute(**inputParams)
                    monitor.afterObjectCompute(**objectParams)
                    monitor.afterLocationAnchor(**locationParams)

    def _senseLearningMode(self, featureSDR):
        inputParams = {
            "activeColumns": featureSDR,
            "basalInput": self.getActiveLocationCells(),
            "apicalInput": self.objectLayer.getActiveCells(),
            "learn": True
        }
        self.inputLayer.compute(**inputParams)

        objectParams = {
            "feedforwardInput":
            self.inputLayer.getActiveCells(),
            "feedforwardGrowthCandidates":
            self.inputLayer.getPredictedActiveCells(),
            "learn":
            True,
        }
        self.objectLayer.compute(**objectParams)

        locationParams = {"anchorInput": self.inputLayer.getWinnerCells()}
        for module in self.locationModules:
            module.learn(**locationParams)

        for monitor in self.monitors.values():
            monitor.afterInputCompute(**inputParams)
            monitor.afterObjectCompute(**objectParams)

    def sense(self, featureSDR, learn):
        for monitor in self.monitors.values():
            monitor.beforeSense(featureSDR)

        if learn:
            self._senseLearningMode(featureSDR)
        else:
            self._senseInferenceMode(featureSDR)

    def learnObjects(self):
        """
    Learn each provided object.

    This method simultaneously learns 4 sets of synapses:
    - location -> input
    - input -> location
    - input -> object
    - object -> input
    """
        for objectName, objectFeatures in self.objects.iteritems():
            self.reset()

            for module in self.locationModules:
                module.activateRandomLocation()

            for feature in objectFeatures:
                locationOnObject = (feature["top"] + feature["height"] / 2,
                                    feature["left"] + feature["width"] / 2)
                self.move(objectName, locationOnObject)

                featureName = feature["name"]
                featureSDR = self.features[featureName]
                for _ in xrange(10):
                    self.sense(featureSDR, learn=True)

                self.locationRepresentations[(
                    objectName,
                    locationOnObject)] = (self.getActiveLocationCells())
                self.inputRepresentations[(
                    objectName, locationOnObject,
                    featureName)] = (self.inputLayer.getActiveCells())

            self.objectRepresentations[
                objectName] = self.objectLayer.getActiveCells()

    def inferObjectsWithRandomMovements(self):
        """
    Infer each object without any location input.
    """
        for objectName, objectFeatures in self.objects.iteritems():
            self.reset()

            inferred = False
            prevTouchSequence = None

            for _ in xrange(4):

                while True:
                    touchSequence = list(objectFeatures)
                    random.shuffle(touchSequence)

                    if prevTouchSequence is not None:
                        if touchSequence[0] == prevTouchSequence[-1]:
                            continue

                    break

                for i, feature in enumerate(touchSequence):
                    locationOnObject = (feature["top"] + feature["height"] / 2,
                                        feature["left"] + feature["width"] / 2)
                    self.move(objectName, locationOnObject)

                    featureName = feature["name"]
                    featureSDR = self.features[featureName]
                    self.sense(featureSDR, learn=False)

                    inferred = (
                        set(self.objectLayer.getActiveCells()) == set(
                            self.objectRepresentations[objectName])
                        and set(self.inputLayer.getActiveCells()) == set(
                            self.inputRepresentations[(objectName,
                                                       locationOnObject,
                                                       featureName)])
                        and set(self.getActiveLocationCells()) == set(
                            self.locationRepresentations[(objectName,
                                                          locationOnObject)]))

                    if inferred:
                        break

                prevTouchSequence = touchSequence

                if inferred:
                    break

    def reset(self):
        for module in self.locationModules:
            module.reset()
        self.objectLayer.reset()
        self.inputLayer.reset()

        self.locationInWorld = None

        for monitor in self.monitors.values():
            monitor.afterReset()
Esempio n. 8
0
class PIUNCorticalColumn(object):
    """
  A L4 + L6a network. Sensory input causes minicolumns in L4 to activate,
  which drives activity in L6a. Motor input causes L6a to perform path
  integration, updating its activity, which then depolarizes cells in L4.

  Whenever the sensor moves, call movementCompute. Whenever a sensory input
  arrives, call sensoryCompute.
  """
    def __init__(self, locationConfigs, L4Overrides=None):
        """
    @param L4Overrides (dict)
    Custom parameters for L4

    @param locationConfigs (sequence of dicts)
    Parameters for the location modules
    """
        L4Params = {
            "columnCount":
            150,
            "cellsPerColumn":
            16,
            "basalInputSize": (len(locationConfigs) * sum(
                np.prod(config["cellDimensions"])
                for config in locationConfigs))
        }
        if L4Overrides is not None:
            L4Params.update(L4Overrides)

        self.L4 = ApicalTiebreakPairMemory(**L4Params)

        self.L6aModules = [
            Superficial2DLocationModule(
                anchorInputSize=self.L4.numberOfCells(), **config)
            for config in locationConfigs
        ]

    def movementCompute(self,
                        displacement,
                        noiseFactor=0,
                        moduleNoiseFactor=0):
        """
    @param displacement (dict)
    The change in location. Example: {"top": 10, "left", 10}

    @return (dict)
    Data for logging/tracing.
    """

        if noiseFactor != 0:
            xdisp = np.random.normal(0, noiseFactor)
            ydisp = np.random.normal(0, noiseFactor)
        else:
            xdisp = 0
            ydisp = 0

        locationParams = {
            "displacement":
            [displacement["top"] + ydisp, displacement["left"] + xdisp],
            "noiseFactor":
            moduleNoiseFactor
        }

        for module in self.L6aModules:
            module.movementCompute(**locationParams)

        return locationParams

    def sensoryCompute(self, activeMinicolumns, learn):
        """
    @param activeMinicolumns (numpy array)
    List of indices of minicolumns to activate.

    @param learn (bool)
    If True, the two layers should learn this association.

    @return (tuple of dicts)
    Data for logging/tracing.
    """
        inputParams = {
            "activeColumns": activeMinicolumns,
            "basalInput": self.getLocationRepresentation(),
            "learn": learn
        }
        self.L4.compute(**inputParams)

        locationParams = {
            "anchorInput": self.L4.getActiveCells(),
            "anchorGrowthCandidates": self.L4.getWinnerCells(),
            "learn": learn,
        }
        for module in self.L6aModules:
            module.sensoryCompute(**locationParams)

        return (inputParams, locationParams)

    def reset(self):
        """
    Clear all cell activity.
    """
        self.L4.reset()
        for module in self.L6aModules:
            module.reset()

    def activateRandomLocation(self):
        """
    Activate a random location in the location layer.
    """
        for module in self.L6aModules:
            module.activateRandomLocation()

    def getSensoryRepresentation(self):
        """
    Gets the active cells in the sensory layer.
    """
        return self.L4.getActiveCells()

    def getLocationRepresentation(self):
        """
    Get the full population representation of the location layer.
    """
        activeCells = np.array([], dtype="uint32")

        totalPrevCells = 0
        for module in self.L6aModules:
            activeCells = np.append(activeCells,
                                    module.getActiveCells() + totalPrevCells)
            totalPrevCells += module.numberOfCells()

        return activeCells
class PIUNCorticalColumn(object):
  """
  A L4 + L6a network. Sensory input causes minicolumns in L4 to activate,
  which drives activity in L6a. Motor input causes L6a to perform path
  integration, updating its activity, which then depolarizes cells in L4.

  Whenever the sensor moves, call movementCompute. Whenever a sensory input
  arrives, call sensoryCompute.
  """

  def __init__(self, locationConfigs, L4Overrides=None):
    """
    @param L4Overrides (dict)
    Custom parameters for L4

    @param locationConfigs (sequence of dicts)
    Parameters for the location modules
    """
    L4Params = {
      "columnCount": 150,
      "cellsPerColumn": 16,
      "basalInputSize": (len(locationConfigs) *
                         sum(np.prod(config["cellDimensions"])
                             for config in locationConfigs))
    }
    if L4Overrides is not None:
      L4Params.update(L4Overrides)

    self.L4 = ApicalTiebreakPairMemory(**L4Params)

    self.L6aModules = [
      Superficial2DLocationModule(
        anchorInputSize=self.L4.numberOfCells(),
        **config)
      for config in locationConfigs]


  def movementCompute(self, displacement, noiseFactor = 0, moduleNoiseFactor = 0):
    """
    @param displacement (dict)
    The change in location. Example: {"top": 10, "left", 10}

    @return (dict)
    Data for logging/tracing.
    """

    if noiseFactor != 0:
      xdisp = np.random.normal(0, noiseFactor)
      ydisp = np.random.normal(0, noiseFactor)
    else:
      xdisp = 0
      ydisp = 0

    locationParams = {
      "displacement": [displacement["top"] + ydisp,
                       displacement["left"] + xdisp],
      "noiseFactor": moduleNoiseFactor
    }

    for module in self.L6aModules:
      module.movementCompute(**locationParams)

    return locationParams


  def sensoryCompute(self, activeMinicolumns, learn):
    """
    @param activeMinicolumns (numpy array)
    List of indices of minicolumns to activate.

    @param learn (bool)
    If True, the two layers should learn this association.

    @return (tuple of dicts)
    Data for logging/tracing.
    """
    inputParams = {
      "activeColumns": activeMinicolumns,
      "basalInput": self.getLocationRepresentation(),
      "learn": learn
    }
    self.L4.compute(**inputParams)

    locationParams = {
      "anchorInput": self.L4.getActiveCells(),
      "anchorGrowthCandidates": self.L4.getWinnerCells(),
      "learn": learn,
    }
    for module in self.L6aModules:
      module.sensoryCompute(**locationParams)

    return (inputParams, locationParams)


  def reset(self):
    """
    Clear all cell activity.
    """
    self.L4.reset()
    for module in self.L6aModules:
      module.reset()


  def activateRandomLocation(self):
    """
    Activate a random location in the location layer.
    """
    for module in self.L6aModules:
      module.activateRandomLocation()


  def getSensoryRepresentation(self):
    """
    Gets the active cells in the sensory layer.
    """
    return self.L4.getActiveCells()


  def getLocationRepresentation(self):
    """
    Get the full population representation of the location layer.
    """
    activeCells = np.array([], dtype="uint32")

    totalPrevCells = 0
    for module in self.L6aModules:
      activeCells = np.append(activeCells,
                              module.getActiveCells() + totalPrevCells)
      totalPrevCells += module.numberOfCells()

    return activeCells
class RelationalMemory(object):
    def __init__(self, l4N, l4W, numModules, moduleDimensions,
                 maxActivePerModule, l6ActivationThreshold):
        self.numModules = numModules
        self.moduleDimensions = moduleDimensions
        self._cellsPerModule = np.prod(moduleDimensions)
        self.maxActivePerModule = maxActivePerModule
        self.l4N = l4N
        self.l4W = l4W
        self.l6ActivationThreshold = l6ActivationThreshold

        self.l4TM = TemporalMemory(
            columnCount=l4N,
            basalInputSize=numModules * self._cellsPerModule,
            cellsPerColumn=4,
            #activationThreshold=int(numModules / 2) + 1,
            #reducedBasalThreshold=int(numModules / 2) + 1,
            activationThreshold=1,
            reducedBasalThreshold=1,
            initialPermanence=1.0,
            connectedPermanence=0.5,
            minThreshold=1,
            sampleSize=numModules,
            permanenceIncrement=1.0,
            permanenceDecrement=0.0,
        )
        self.l6Connections = [
            Connections(numCells=self._cellsPerModule)
            for _ in xrange(numModules)
        ]

        #self.classifier = KNNClassifier(k=1, distanceMethod="rawOverlap")
        self.classifier = KNNClassifier(k=1, distanceMethod="norm")

        # Active state
        self.activeL6Cells = [[] for _ in xrange(numModules)]
        self.activeL5Cells = [[] for _ in xrange(numModules)]
        self.predictedL6Cells = [set([]) for _ in xrange(numModules)]

        # Debug state
        self.activeL6BeforeMotor = [[] for _ in xrange(numModules)]
        self.l6ToL4Map = collections.defaultdict(list)

    def reset(self):
        self.activeL6Cells = [[] for _ in xrange(self.numModules)]
        self.activeL5Cells = [[] for _ in xrange(self.numModules)]
        self.predictedL6Cells = [set([]) for _ in xrange(self.numModules)]

    def trainFeatures(self, sensoryInputs):
        # Randomly assign bilateral connections and zero others
        for sense in sensoryInputs:
            # Choose L6 cells randomly
            activeL6Cells = [[np.random.randint(self._cellsPerModule)]
                             for _ in xrange(self.numModules)]
            l4BasalInput = getGlobalIndices(activeL6Cells,
                                            self._cellsPerModule)

            # Learn L6->L4 connections
            self.l4TM.compute(activeColumns=sense,
                              basalInput=l4BasalInput,
                              learn=True)
            self.l4TM.compute(activeColumns=sense,
                              basalInput=l4BasalInput,
                              learn=True)
            self.l4TM.compute(activeColumns=sense,
                              basalInput=l4BasalInput,
                              learn=True)
            self.l4TM.compute(activeColumns=sense,
                              basalInput=l4BasalInput,
                              learn=True)
            activeL4Cells = self.l4TM.getActiveCells()
            # Debug: store the map
            for l6Cell in itertools.chain(*activeL6Cells):
                self.l6ToL4Map[l6Cell].extend(activeL4Cells)
            # Learn L4->L6 connections
            for l6Cells, connections in zip(activeL6Cells, self.l6Connections):
                # Assumes one cell active per L6 module when training features
                segment = connections.createSegment(l6Cells[0])
                for l4Cell in activeL4Cells:
                    connections.createSynapse(segment, l4Cell, 1.0)

    def compute(self, ff, motor, objClass, outputFile):
        """Run one iteration of the online sensorimotor algorithm.

    This function has three stages:

    - The FEEDFORWARD pass drives

    Prerequisites: `trainFeatures` must have been run already

    :param ff: feedforward sensory input
    :param motor: the motor command for next move, in the form of delta
        coordinates
    :param objClass: the object class to train the classifier, or None
        if not learning
    """
        delta = motor

        # FEEDFORWARD

        # Determine active feature representation in l4, using lateral input
        # from l6 previous step feedback
        l4BasalInput = getGlobalIndices(self.predictedL6Cells,
                                        self._cellsPerModule)
        self.l4TM.compute(activeColumns=ff,
                          basalInput=l4BasalInput,
                          learn=False)
        predictedL4Cells = self.l4TM.getPredictedCells()
        activeL4Cells = self.l4TM.getActiveCells()

        # Drive L6 activation from l4
        for m, connections in enumerate(self.l6Connections):
            newCells = []
            activeConnectedPerSegment = connections.computeActivity(
                activeL4Cells, 0.5)[0]
            for flatIdx, activeConnected in enumerate(
                    activeConnectedPerSegment):
                if activeConnected >= self.l6ActivationThreshold:
                    cellIdx = connections.segmentForFlatIdx(flatIdx).cell
                    newCells.append(cellIdx)

            #for cell in newCells:
            #  print connections.segmentsForCell(cell)
            #print newCells
            #assert len(newCells) <= 1

            self.activeL6Cells[m].insert(0, newCells)
            # TODO: This is the number of steps, not necessarily the number of cells
            lenBefore = len(self.activeL6Cells[m])
            del self.activeL6Cells[m][self.maxActivePerModule:]
            lenAfter = len(self.activeL6Cells[m])
            #assert lenBefore == lenAfter, "Debug assert to check that we aren't hitting limit on L6 activity. Can remove when we set max active low enough relative to object size (times number of train/test iterations)"

        self.activeL6BeforeMotor = [
            list(itertools.chain(*l6Module)) for l6Module in self.activeL6Cells
        ]

        # Replace l5 activity with new transforms
        self.activeL5Cells = []
        for activeL6Module in self.activeL6Cells:
            transforms = set()
            for newCell in activeL6Module[0]:
                for prevCell in itertools.chain(*activeL6Module[1:]):
                    if newCell == prevCell:
                        continue
                    # Transform from prev to new
                    t1 = bind(prevCell, newCell, self.moduleDimensions)
                    transforms.add(t1)
                    # Transform from new to prev
                    t2 = bind(newCell, prevCell, self.moduleDimensions)
                    transforms.add(t2)
            self.activeL5Cells.append(list(transforms))

        # Pool into object representation
        globalL5ActiveCells = getGlobalIndices(self.activeL5Cells,
                                               self._cellsPerModule)
        denseL5 = np.zeros(self._cellsPerModule * self.numModules,
                           dtype="bool")
        denseL5[globalL5ActiveCells] = 1
        self.prediction = self.classifier.infer(denseL5)
        if objClass is not None:
            self.classifier.learn(denseL5, objClass)

        #print globalL5ActiveCells

        # MOTOR

        # Update L6 based on motor command
        numActivePerModuleBefore = [
            sum([len(cells) for cells in active])
            for active in self.activeL6Cells
        ]

        self.activeL6Cells = [[[
            pathIntegrate(c, self.moduleDimensions, delta) for c in steps
        ] for steps in prevActiveCells]
                              for prevActiveCells in self.activeL6Cells]

        numActivePerModuleAfter = [
            sum([len(cells) for cells in active])
            for active in self.activeL6Cells
        ]
        assert numActivePerModuleAfter == numActivePerModuleBefore

        # FEEDBACK

        # Get all transforms associated with object
        # TODO: Get transforms from object in addition to current activity
        predictiveTransforms = [l5Active for l5Active in self.activeL5Cells]

        # Get set of predicted l6 representations (including already active)
        # and store them for next step l4 compute
        self.predictedL6Cells = []
        for l6, l5 in itertools.izip(self.activeL6Cells, predictiveTransforms):
            predictedCells = []
            for activeL6Cell in set(itertools.chain(*l6)):
                for activeL5Cell in l5:
                    predictedCell = unbind(activeL6Cell, activeL5Cell,
                                           self.moduleDimensions)
                    predictedCells.append(predictedCell)
            self.predictedL6Cells.append(
                set(list(itertools.chain(*l6)) + predictedCells))

        # Log this step
        if outputFile:
            log = RelationalMemoryLog.new_message()
            log.ts = time.time()
            sensationProto = log.init("sensation", len(ff))
            for i in xrange(len(ff)):
                sensationProto[i] = int(ff[i])
            predictedL4Proto = log.init("predictedL4", len(predictedL4Cells))
            for i in xrange(len(predictedL4Cells)):
                predictedL4Proto[i] = int(predictedL4Cells[i])
            activeL4Proto = log.init("activeL4", len(activeL4Cells))
            for i in xrange(len(activeL4Cells)):
                activeL4Proto[i] = int(activeL4Cells[i])
            activeL6HistoryProto = log.init("activeL6History",
                                            len(self.activeL6Cells))
            for i in xrange(len(self.activeL6Cells)):
                activeL6ModuleProto = activeL6HistoryProto.init(
                    i, len(self.activeL6Cells[i]))
                for j in xrange(len(self.activeL6Cells[i])):
                    activeL6ModuleStepProto = activeL6ModuleProto.init(
                        j, len(self.activeL6Cells[i][j]))
                    for k in xrange(len(self.activeL6Cells[i][j])):
                        activeL6ModuleStepProto[k] = int(
                            self.activeL6Cells[i][j][k])
            activeL5Proto = log.init("activeL5", len(self.activeL5Cells))
            for i in xrange(len(self.activeL5Cells)):
                activeL5ModuleProto = activeL5Proto.init(
                    i, len(self.activeL5Cells[i]))
                for j in xrange(len(self.activeL5Cells[i])):
                    activeL5ModuleProto[j] = int(self.activeL5Cells[i][j])

            classifierResults = [
                (i, distance) for i, distance in enumerate(self.prediction[2])
                if distance is not None
            ]
            classifierResultsProto = log.init("classifierResults",
                                              len(classifierResults))
            for i in xrange(len(classifierResults)):
                classifierResultProto = classifierResultsProto[i]
                classifierResultProto.label = classifierResults[i][0]
                classifierResultProto.distance = float(classifierResults[i][1])

            motorDeltaProto = log.init("motorDelta", len(delta))
            for i in xrange(len(delta)):
                motorDeltaProto[i] = int(delta[i])
            predictedL6Proto = log.init("predictedL6",
                                        len(self.predictedL6Cells))
            for i in xrange(len(self.predictedL6Cells)):
                predictedL6ModuleProto = predictedL6Proto.init(
                    i, len(self.predictedL6Cells[i]))
                for j, c in enumerate(self.predictedL6Cells[i]):
                    predictedL6ModuleProto[j] = int(c)

            json.dump(log.to_dict(), outputFile)
            outputFile.write("\n")
Esempio n. 11
0
class SingleLayerLocation2DExperiment(object):
    """
  The experiment code organized into a class.
  """
    def __init__(self, diameter, objects, featureNames):
        self.diameter = diameter

        self.objects = objects

        # A grid of location SDRs.
        self.locations = dict(
            ((i, j),
             np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32"))
            for i in xrange(diameter) for j in xrange(diameter))

        # 8 transition SDRs -- one for each straight and diagonal direction.
        self.transitions = dict(
            ((i, j),
             np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32"))
            for i in xrange(-1, 2) for j in xrange(-1, 2) if i != 0 or j != 0)

        self.features = dict(
            (k,
             np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32"))
            for k in featureNames)

        self.locationLayer = SingleLayerLocationMemory(
            **{
                "cellCount": 1000,
                "deltaLocationInputSize": 1000,
                "featureLocationInputSize": 150 * 32,
                "sampleSize": 15,
                "activationThreshold": 10,
                "learningThreshold": 8,
            })

        self.inputLayer = ApicalTiebreakPairMemory(
            **{
                "columnCount": 150,
                "cellsPerColumn": 32,
                "basalInputSize": 1000,
                "apicalInputSize": 4096,
            })

        self.objectLayer = ColumnPooler(**{"inputWidth": 150 * 32})

        # Use these for classifying SDRs and for testing whether they're correct.
        self.inputRepresentations = {}
        self.objectRepresentations = {}
        self.learnedObjectPlacements = {}

        self.monitors = {}
        self.nextMonitorToken = 1

    def addMonitor(self, monitor):
        """
    Subscribe to SingleLayer2DExperiment events.

    @param monitor (SingleLayer2DExperimentMonitor)
    An object that implements a set of monitor methods

    @return (object)
    An opaque object that can be used to refer to this monitor.
    """

        token = self.nextMonitorToken
        self.nextMonitorToken += 1

        self.monitors[token] = monitor

        return token

    def removeMonitor(self, monitorToken):
        """
    Unsubscribe from LocationExperiment events.

    @param monitorToken (object)
    The return value of addMonitor() from when this monitor was added
    """
        del self.monitors[monitorToken]

    def doTimestep(self, locationSDR, transitionSDR, featureSDR,
                   egocentricLocation, learn):
        """
    Run one timestep.
    """

        for monitor in self.monitors.values():
            monitor.beforeTimestep(locationSDR, transitionSDR, featureSDR,
                                   egocentricLocation, learn)

        params = {
            "newLocation":
            locationSDR,
            "deltaLocation":
            transitionSDR,
            "featureLocationInput":
            self.inputLayer.getActiveCells(),
            "featureLocationGrowthCandidates":
            self.inputLayer.getPredictedActiveCells(),
            "learn":
            learn,
        }
        self.locationLayer.compute(**params)
        for monitor in self.monitors.values():
            monitor.afterLocationCompute(**params)

        params = {
            "activeColumns": featureSDR,
            "basalInput": self.locationLayer.getActiveCells(),
            "apicalInput": self.objectLayer.getActiveCells(),
        }
        self.inputLayer.compute(**params)
        for monitor in self.monitors.values():
            monitor.afterInputCompute(**params)

        params = {
            "feedforwardInput":
            self.inputLayer.getActiveCells(),
            "feedforwardGrowthCandidates":
            self.inputLayer.getPredictedActiveCells(),
            "learn":
            learn,
        }
        self.objectLayer.compute(**params)
        for monitor in self.monitors.values():
            monitor.afterObjectCompute(**params)

    def learnTransitions(self):
        """
    Train the location layer to do path integration. For every location, teach
    it each previous-location + motor command pair.
    """

        print "Learning transitions"
        for (i, j), locationSDR in self.locations.iteritems():
            print "i, j", (i, j)
            for (di, dj), transitionSDR in self.transitions.iteritems():
                i2 = i + di
                j2 = j + dj
                if (0 <= i2 < self.diameter and 0 <= j2 < self.diameter):
                    for _ in xrange(5):
                        self.locationLayer.reset()
                        self.locationLayer.compute(
                            newLocation=self.locations[(i, j)])
                        self.locationLayer.compute(
                            deltaLocation=transitionSDR,
                            newLocation=self.locations[(i2, j2)])

        self.locationLayer.reset()

    def learnObjects(self, objectPlacements):
        """
    Learn each provided object in egocentric space. Touch every location on each
    object.

    This method doesn't try move the sensor along a path. Instead it just leaps
    the sensor to each object location, resetting the location layer with each
    leap.

    This method simultaneously learns 4 sets of synapses:
    - location -> input
    - input -> location
    - input -> object
    - object -> input
    """
        for monitor in self.monitors.values():
            monitor.afterPlaceObjects(objectPlacements)

        for objectName, objectDict in self.objects.iteritems():
            self.reset()

            objectPlacement = objectPlacements[objectName]

            for locationName, featureName in objectDict.iteritems():
                egocentricLocation = (locationName[0] + objectPlacement[0],
                                      locationName[1] + objectPlacement[1])

                locationSDR = self.locations[egocentricLocation]
                featureSDR = self.features[featureName]
                transitionSDR = np.empty(0)

                self.locationLayer.reset()
                self.inputLayer.reset()

                for _ in xrange(10):
                    self.doTimestep(locationSDR,
                                    transitionSDR,
                                    featureSDR,
                                    egocentricLocation,
                                    learn=True)

                self.inputRepresentations[(
                    featureName,
                    egocentricLocation)] = (self.inputLayer.getActiveCells())

            self.objectRepresentations[
                objectName] = self.objectLayer.getActiveCells()
            self.learnedObjectPlacements[objectName] = objectPlacement

    def _selectTransition(self, allocentricLocation, objectDict, visitCounts):
        """
    Choose the transition that lands us in the location we've touched the least
    often. Break ties randomly, i.e. choose the first candidate in a shuffled
    list.
    """

        candidates = list(
            transition for transition in self.transitions.keys()
            if (allocentricLocation[0] + transition[0],
                allocentricLocation[1] + transition[1]) in objectDict)
        random.shuffle(candidates)

        selectedVisitCount = None
        selectedTransition = None
        selectedAllocentricLocation = None

        for transition in candidates:
            candidateLocation = (allocentricLocation[0] + transition[0],
                                 allocentricLocation[1] + transition[1])

            if (selectedVisitCount is None
                    or visitCounts[candidateLocation] < selectedVisitCount):
                selectedVisitCount = visitCounts[candidateLocation]
                selectedTransition = transition
                selectedAllocentricLocation = candidateLocation

        return selectedAllocentricLocation, selectedTransition

    def inferObject(self,
                    objectPlacements,
                    objectName,
                    startPoint,
                    transitionSequence,
                    settlingTime=2):
        for monitor in self.monitors.values():
            monitor.afterPlaceObjects(objectPlacements)

        objectDict = self.objects[objectName]

        self.reset()

        allocentricLocation = startPoint
        nextTransitionSDR = np.empty(0, dtype="uint32")

        transitionIterator = iter(transitionSequence)

        try:
            while True:
                featureName = objectDict[allocentricLocation]
                egocentricLocation = (allocentricLocation[0] +
                                      objectPlacements[objectName][0],
                                      allocentricLocation[1] +
                                      objectPlacements[objectName][1])
                featureSDR = self.features[featureName]

                steps = ([nextTransitionSDR] + [np.empty(0)] * settlingTime)
                for transitionSDR in steps:
                    self.doTimestep(np.empty(0),
                                    transitionSDR,
                                    featureSDR,
                                    egocentricLocation,
                                    learn=False)

                transitionName = transitionIterator.next()
                allocentricLocation = (allocentricLocation[0] +
                                       transitionName[0],
                                       allocentricLocation[1] +
                                       transitionName[1])
                nextTransitionSDR = self.transitions[transitionName]
        except StopIteration:
            pass

    def inferObjectsWithRandomMovements(self,
                                        objectPlacements,
                                        maxTouches=20,
                                        settlingTime=2):
        """
    Infer each object without any location input.
    """

        for monitor in self.monitors.values():
            monitor.afterPlaceObjects(objectPlacements)

        for objectName, objectDict in self.objects.iteritems():
            self.reset()

            visitCounts = defaultdict(int)

            learnedObjectPlacement = self.learnedObjectPlacements[objectName]

            allocentricLocation = random.choice(objectDict.keys())
            nextTransitionSDR = np.empty(0, dtype="uint32")

            # Traverse the object until it is inferred.
            success = False

            for _ in xrange(maxTouches):
                featureName = objectDict[allocentricLocation]
                egocentricLocation = (allocentricLocation[0] +
                                      objectPlacements[objectName][0],
                                      allocentricLocation[1] +
                                      objectPlacements[objectName][1])
                featureSDR = self.features[featureName]

                steps = ([nextTransitionSDR] + [np.empty(0)] * settlingTime)
                for transitionSDR in steps:
                    self.doTimestep(np.empty(0),
                                    transitionSDR,
                                    featureSDR,
                                    egocentricLocation,
                                    learn=False)

                visitCounts[allocentricLocation] += 1

                # We should eventually infer the egocentric location where we originally
                # learned this location on the object.
                learnedEgocentricLocation = (allocentricLocation[0] +
                                             learnedObjectPlacement[0],
                                             allocentricLocation[1] +
                                             learnedObjectPlacement[1])

                if (set(self.objectLayer.getActiveCells()) == set(
                        self.objectRepresentations[objectName])
                        and set(self.inputLayer.getActiveCells()) == set(
                            self.inputRepresentations[(
                                featureName, learnedEgocentricLocation)])
                        and set(self.locationLayer.getActiveCells()) == set(
                            self.locations[learnedEgocentricLocation])):
                    success = True
                    break
                else:
                    allocentricLocation, transitionName = self._selectTransition(
                        allocentricLocation, objectDict, visitCounts)
                    nextTransitionSDR = self.transitions[transitionName]

    def reset(self):
        self.locationLayer.reset()
        self.objectLayer.reset()
        self.inputLayer.reset()

        for monitor in self.monitors.values():
            monitor.afterReset()
class RelationalMemory(object):

  def __init__(self, l4N, l4W, numModules, moduleDimensions,
               maxActivePerModule, l6ActivationThreshold):
    self.numModules = numModules
    self.moduleDimensions = moduleDimensions
    self._cellsPerModule = np.prod(moduleDimensions)
    self.maxActivePerModule = maxActivePerModule
    self.l4N = l4N
    self.l4W = l4W
    self.l6ActivationThreshold = l6ActivationThreshold

    self.l4TM = TemporalMemory(
        columnCount=l4N,
        basalInputSize=numModules*self._cellsPerModule,
        cellsPerColumn=4,
        #activationThreshold=int(numModules / 2) + 1,
        #reducedBasalThreshold=int(numModules / 2) + 1,
        activationThreshold=1,
        reducedBasalThreshold=1,
        initialPermanence=1.0,
        connectedPermanence=0.5,
        minThreshold=1,
        sampleSize=numModules,
        permanenceIncrement=1.0,
        permanenceDecrement=0.0,
    )
    self.l6Connections = [Connections(numCells=self._cellsPerModule)
                          for _ in xrange(numModules)]

    self.pooler = ColumnPooler(
      inputWidth=self.numModules*self._cellsPerModule,
    )

    self.classifier = KNNClassifier(k=1, distanceMethod="rawOverlap")
    #self.classifier = KNNClassifier(k=1, distanceMethod="norm")

    # Active state
    self.activeL6Cells = [[] for _ in xrange(numModules)]
    self.activeL5Cells = [[] for _ in xrange(numModules)]
    self.predictedL6Cells = [set([]) for _ in xrange(numModules)]

    # Debug state
    self.activeL6BeforeMotor = [[] for _ in xrange(numModules)]
    self.l6ToL4Map = collections.defaultdict(list)

  def reset(self):
    self.activeL6Cells = [[] for _ in xrange(self.numModules)]
    self.activeL5Cells = [[] for _ in xrange(self.numModules)]
    self.predictedL6Cells = [set([]) for _ in xrange(self.numModules)]
    self.l4TM.reset()
    self.pooler.reset()

  def trainFeatures(self, sensoryInputs):
    # Randomly assign bilateral connections and zero others
    for sense in sensoryInputs:
      # Choose L6 cells randomly
      activeL6Cells = [[np.random.randint(self._cellsPerModule)]
                       for _ in xrange(self.numModules)]
      l4BasalInput = getGlobalIndices(activeL6Cells, self._cellsPerModule)

      # Learn L6->L4 connections
      self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True)
      self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True)
      self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True)
      self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True)
      activeL4Cells = self.l4TM.getActiveCells()
      # Debug: store the map
      for l6Cell in itertools.chain(*activeL6Cells):
        self.l6ToL4Map[l6Cell].extend(activeL4Cells)
      # Learn L4->L6 connections
      for l6Cells, connections in zip(activeL6Cells, self.l6Connections):
        # Assumes one cell active per L6 module when training features
        segment = connections.createSegment(l6Cells[0])
        for l4Cell in activeL4Cells:
          connections.createSynapse(segment, l4Cell, 1.0)

  def compute(self, ff, motor, objClass, outputFile):
    """Run one iteration of the online sensorimotor algorithm.

    This function has three stages:

    - The FEEDFORWARD pass drives

    Prerequisites: `trainFeatures` must have been run already

    :param ff: feedforward sensory input
    :param motor: the motor command for next move, in the form of delta
        coordinates
    :param objClass: the object class to train the classifier, or None
        if not learning
    """
    delta = motor

    # FEEDFORWARD

    # Determine active feature representation in l4, using lateral input
    # from l6 previous step feedback
    l4BasalInput = getGlobalIndices(self.predictedL6Cells, self._cellsPerModule)
    self.l4TM.compute(activeColumns=ff, basalInput=l4BasalInput,
                      learn=False)
    predictedL4Cells = self.l4TM.getPredictedCells()
    activeL4Cells = self.l4TM.getActiveCells()

    # Drive L6 activation from l4
    for m, connections in enumerate(self.l6Connections):
      newCells = []
      activeConnectedPerSegment = connections.computeActivity(activeL4Cells, 0.5)[0]
      for flatIdx, activeConnected in enumerate(activeConnectedPerSegment):
        if activeConnected >= self.l6ActivationThreshold:
          cellIdx = connections.segmentForFlatIdx(flatIdx).cell
          newCells.append(cellIdx)

      #for cell in newCells:
      #  print connections.segmentsForCell(cell)
      #print newCells
      #assert len(newCells) <= 1

      self.activeL6Cells[m].insert(0, newCells)
      # TODO: This is the number of steps, not necessarily the number of cells
      lenBefore = len(self.activeL6Cells[m])
      del self.activeL6Cells[m][self.maxActivePerModule:]
      lenAfter = len(self.activeL6Cells[m])
      #assert lenBefore == lenAfter, "Debug assert to check that we aren't hitting limit on L6 activity. Can remove when we set max active low enough relative to object size (times number of train/test iterations)"

    self.activeL6BeforeMotor = [list(itertools.chain(*l6Module))
                                for l6Module in self.activeL6Cells]

    # Replace l5 activity with new transforms
    self.activeL5Cells = []
    for activeL6Module in self.activeL6Cells:
      transforms = set()
      for newCell in activeL6Module[0]:
        for prevCell in itertools.chain(*activeL6Module[1:]):
          if newCell == prevCell:
            continue
          # Transform from prev to new
          t1 = bind(prevCell, newCell, self.moduleDimensions)
          transforms.add(t1)
          # Transform from new to prev
          t2 = bind(newCell, prevCell, self.moduleDimensions)
          transforms.add(t2)
      self.activeL5Cells.append(list(transforms))


    # Pool into object representation
    classifierLearn = True if objClass is not None else False
    globalL5ActiveCells = sorted(getGlobalIndices(self.activeL5Cells, self._cellsPerModule))
    self.pooler.compute(feedforwardInput=globalL5ActiveCells,
                        learn=classifierLearn,
                        predictedInput=globalL5ActiveCells)

    # Classifier
    classifierInput = np.zeros((self.pooler.numberOfCells(),), dtype=np.uint32)
    classifierInput[self.pooler.getActiveCells()] = 1
    #print classifierInput.nonzero()
    #print self.pooler.getActiveCells()
    #print
    self.prediction = self.classifier.infer(classifierInput)
    if objClass is not None:
      self.classifier.learn(classifierInput, objClass)

    # MOTOR

    # Update L6 based on motor command
    numActivePerModuleBefore = [sum([len(cells) for cells in active]) for active in self.activeL6Cells]

    self.activeL6Cells = [
        [[pathIntegrate(c, self.moduleDimensions, delta)
          for c in steps]
         for steps in prevActiveCells]
        for prevActiveCells in self.activeL6Cells]

    numActivePerModuleAfter = [sum([len(cells) for cells in active]) for active in self.activeL6Cells]
    assert numActivePerModuleAfter == numActivePerModuleBefore

    # FEEDBACK

    # Get all transforms associated with object
    # TODO: Get transforms from object in addition to current activity
    predictiveTransforms = [l5Active for l5Active in self.activeL5Cells]

    # Get set of predicted l6 representations (including already active)
    # and store them for next step l4 compute
    self.predictedL6Cells = []
    for l6, l5 in itertools.izip(self.activeL6Cells, predictiveTransforms):
      predictedCells = []
      for activeL6Cell in set(itertools.chain(*l6)):
        for activeL5Cell in l5:
          predictedCell = unbind(activeL6Cell, activeL5Cell, self.moduleDimensions)
          predictedCells.append(predictedCell)
      self.predictedL6Cells.append(set(
          list(itertools.chain(*l6)) + predictedCells))

    # Log this step
    if outputFile:
      log = RelationalMemoryLog.new_message()
      log.ts = time.time()
      sensationProto = log.init("sensation", len(ff))
      for i in xrange(len(ff)):
        sensationProto[i] = int(ff[i])
      predictedL4Proto = log.init("predictedL4", len(predictedL4Cells))
      for i in xrange(len(predictedL4Cells)):
        predictedL4Proto[i] = int(predictedL4Cells[i])
      activeL4Proto = log.init("activeL4", len(activeL4Cells))
      for i in xrange(len(activeL4Cells)):
        activeL4Proto[i] = int(activeL4Cells[i])
      activeL6HistoryProto = log.init("activeL6History", len(self.activeL6Cells))
      for i in xrange(len(self.activeL6Cells)):
        activeL6ModuleProto = activeL6HistoryProto.init(i, len(self.activeL6Cells[i]))
        for j in xrange(len(self.activeL6Cells[i])):
          activeL6ModuleStepProto = activeL6ModuleProto.init(j, len(self.activeL6Cells[i][j]))
          for k in xrange(len(self.activeL6Cells[i][j])):
            activeL6ModuleStepProto[k] = int(self.activeL6Cells[i][j][k])
      activeL5Proto = log.init("activeL5", len(self.activeL5Cells))
      for i in xrange(len(self.activeL5Cells)):
        activeL5ModuleProto = activeL5Proto.init(i, len(self.activeL5Cells[i]))
        for j in xrange(len(self.activeL5Cells[i])):
          activeL5ModuleProto[j] = int(self.activeL5Cells[i][j])

      classifierResults = [(i, distance)
                           for i, distance in enumerate(self.prediction[2])
                           if distance is not None]
      classifierResultsProto = log.init("classifierResults",
                                        len(classifierResults))
      for i in xrange(len(classifierResults)):
        classifierResultProto = classifierResultsProto[i]
        classifierResultProto.label = classifierResults[i][0]
        classifierResultProto.distance = float(classifierResults[i][1])

      motorDeltaProto = log.init("motorDelta", len(delta))
      for i in xrange(len(delta)):
        motorDeltaProto[i] = int(delta[i])
      predictedL6Proto = log.init("predictedL6", len(self.predictedL6Cells))
      for i in xrange(len(self.predictedL6Cells)):
        predictedL6ModuleProto = predictedL6Proto.init(i, len(self.predictedL6Cells[i]))
        for j, c in enumerate(self.predictedL6Cells[i]):
          predictedL6ModuleProto[j] = int(c)

      json.dump(log.to_dict(), outputFile)
      outputFile.write("\n")
class Grid2DLocationExperiment(object):
  """
  The experiment code organized into a class.
  """

  def __init__(self, objects, objectPlacements, featureNames, locationConfigs,
               worldDimensions):

    self.objects = objects
    self.objectPlacements = objectPlacements
    self.worldDimensions = worldDimensions

    self.features = dict(
      (k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32"))
      for k in featureNames)

    self.locationModules = [SuperficialLocationModule2D(anchorInputSize=150*32,
                                                        **config)
                            for config in locationConfigs]

    self.inputLayer = ApicalTiebreakPairMemory(**{
      "columnCount": 150,
      "cellsPerColumn": 32,
      "basalInputSize": 18 * sum(np.prod(config["cellDimensions"])
                                 for config in locationConfigs),
      "apicalInputSize": 4096
    })

    self.objectLayer = ColumnPooler(**{
      "inputWidth": 150 * 32
    })

    # Use these for classifying SDRs and for testing whether they're correct.
    self.locationRepresentations = {
      # Example:
      # (objectName, (top, left)): [0, 26, 54, 77, 101, ...]
    }
    self.inputRepresentations = {
      # Example:
      # (objectName, (top, left), featureName): [0, 26, 54, 77, 101, ...]
    }
    self.objectRepresentations = {
      # Example:
      # objectName: [14, 19, 54, 107, 201, ...]
    }

    self.locationInWorld = None

    self.maxSettlingTime = 10

    self.monitors = {}
    self.nextMonitorToken = 1


  def addMonitor(self, monitor):
    """
    Subscribe to Grid2DLocationExperimentMonitor events.

    @param monitor (Grid2DLocationExperimentMonitor)
    An object that implements a set of monitor methods

    @return (object)
    An opaque object that can be used to refer to this monitor.
    """

    token = self.nextMonitorToken
    self.nextMonitorToken += 1

    self.monitors[token] = monitor

    return token


  def removeMonitor(self, monitorToken):
    """
    Unsubscribe from LocationExperiment events.

    @param monitorToken (object)
    The return value of addMonitor() from when this monitor was added
    """
    del self.monitors[monitorToken]


  def getActiveLocationCells(self):
    activeCells = np.array([], dtype="uint32")

    totalPrevCells = 0
    for i, module in enumerate(self.locationModules):
      activeCells = np.append(activeCells,
                              module.getActiveCells() + totalPrevCells)
      totalPrevCells += module.numberOfCells()

    return activeCells


  def move(self, objectName, locationOnObject):
    objectPlacement = self.objectPlacements[objectName]
    locationInWorld = (objectPlacement[0] + locationOnObject[0],
                       objectPlacement[1] + locationOnObject[1])

    if self.locationInWorld is not None:
      deltaLocation = (locationInWorld[0] - self.locationInWorld[0],
                       locationInWorld[1] - self.locationInWorld[1])

      for monitor in self.monitors.values():
        monitor.beforeMove(deltaLocation)

      params = {
        "deltaLocation": deltaLocation
      }
      for module in self.locationModules:
        module.shift(**params)

      for monitor in self.monitors.values():
        monitor.afterLocationShift(**params)

    self.locationInWorld = locationInWorld
    for monitor in self.monitors.values():
      monitor.afterWorldLocationChanged(locationInWorld)


  def _senseInferenceMode(self, featureSDR):
    prevCellActivity = None
    for i in xrange(self.maxSettlingTime):
      inputParams = {
        "activeColumns": featureSDR,
        "basalInput": self.getActiveLocationCells(),
        "apicalInput": self.objectLayer.getActiveCells(),
        "learn": False
      }
      self.inputLayer.compute(**inputParams)

      objectParams = {
        "feedforwardInput": self.inputLayer.getActiveCells(),
        "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
        "learn": False,
      }
      self.objectLayer.compute(**objectParams)

      locationParams = {
        "anchorInput": self.inputLayer.getActiveCells()
      }
      for module in self.locationModules:
        module.anchor(**locationParams)

      cellActivity = (set(self.objectLayer.getActiveCells()),
                      set(self.inputLayer.getActiveCells()),
                      set(self.getActiveLocationCells()))

      if cellActivity == prevCellActivity:
        # It settled. Don't even log this timestep.
        break
      else:
        prevCellActivity = cellActivity
        for monitor in self.monitors.values():
          if i > 0:
            monitor.markSensoryRepetition()

          monitor.afterInputCompute(**inputParams)
          monitor.afterObjectCompute(**objectParams)
          monitor.afterLocationAnchor(**locationParams)


  def _senseLearningMode(self, featureSDR):
    inputParams = {
      "activeColumns": featureSDR,
      "basalInput": self.getActiveLocationCells(),
      "apicalInput": self.objectLayer.getActiveCells(),
      "learn": True
    }
    self.inputLayer.compute(**inputParams)

    objectParams = {
      "feedforwardInput": self.inputLayer.getActiveCells(),
      "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
      "learn": True,
    }
    self.objectLayer.compute(**objectParams)

    locationParams = {
      "anchorInput": self.inputLayer.getWinnerCells()
    }
    for module in self.locationModules:
      module.learn(**locationParams)

    for monitor in self.monitors.values():
      monitor.afterInputCompute(**inputParams)
      monitor.afterObjectCompute(**objectParams)


  def sense(self, featureSDR, learn):
    for monitor in self.monitors.values():
      monitor.beforeSense(featureSDR)

    if learn:
      self._senseLearningMode(featureSDR)
    else:
      self._senseInferenceMode(featureSDR)


  def learnObjects(self):
    """
    Learn each provided object.

    This method simultaneously learns 4 sets of synapses:
    - location -> input
    - input -> location
    - input -> object
    - object -> input
    """
    for objectName, objectFeatures in self.objects.iteritems():
      self.reset()

      for module in self.locationModules:
        module.activateRandomLocation()

      for feature in objectFeatures:
        locationOnObject = (feature["top"] + feature["height"]/2,
                            feature["left"] + feature["width"]/2)
        self.move(objectName, locationOnObject)

        featureName = feature["name"]
        featureSDR = self.features[featureName]
        for _ in xrange(10):
          self.sense(featureSDR, learn=True)

        self.locationRepresentations[(objectName, locationOnObject)] = (
          self.getActiveLocationCells())
        self.inputRepresentations[(objectName, locationOnObject, featureName)] = (
          self.inputLayer.getActiveCells())

      self.objectRepresentations[objectName] = self.objectLayer.getActiveCells()


  def inferObjectsWithRandomMovements(self):
    """
    Infer each object without any location input.
    """
    for objectName, objectFeatures in self.objects.iteritems():
      self.reset()

      inferred = False
      prevTouchSequence = None

      for _ in xrange(4):

        while True:
          touchSequence = list(objectFeatures)
          random.shuffle(touchSequence)

          if prevTouchSequence is not None:
            if touchSequence[0] == prevTouchSequence[-1]:
              continue

          break

        for i, feature in enumerate(touchSequence):
          locationOnObject = (feature["top"] + feature["height"]/2,
                              feature["left"] + feature["width"]/2)
          self.move(objectName, locationOnObject)

          featureName = feature["name"]
          featureSDR = self.features[featureName]
          self.sense(featureSDR, learn=False)

          inferred = (
            set(self.objectLayer.getActiveCells()) ==
            set(self.objectRepresentations[objectName]) and

            set(self.inputLayer.getActiveCells()) ==
            set(self.inputRepresentations[(objectName,
                                           locationOnObject,
                                           featureName)]) and

            set(self.getActiveLocationCells()) ==
            set(self.locationRepresentations[(objectName, locationOnObject)]))

          if inferred:
            break

        prevTouchSequence = touchSequence

        if inferred:
          break


  def reset(self):
    for module in self.locationModules:
      module.reset()
    self.objectLayer.reset()
    self.inputLayer.reset()

    self.locationInWorld = None

    for monitor in self.monitors.values():
      monitor.afterReset()