Example #1
0
class PoolOfPairsLocation1DExperiment(object):
  """
  There are a lot of ways this experiment could choose to associate "operands"
  with results -- e.g. we could just do it randomly. This particular experiment
  assumes there are an equal number of "operand1", "operand2", and "result"
  values. It assigns each operand/result an index, and it relates these via:

    result = (operand1 + operand2) % numLocations

  Note that this experiment would be fundamentally no different if it used
  subtraction:

    result = (operand1 - operand2) % numLocations

  The resulting network would be identical, it's just our interpretation of the
  SDRs that would change.

  This experiment intentionally mimics a 1D space with wraparound, with
  operands/results representing 1D locations and offsets. You can think of this
  as:

    location2 = location1 + offset
    offset = location2 - location1
  """

  def __init__(self,
               numLocations=25,
               numMinicolumns=15,
               numActiveMinicolumns=10,
               poolingThreshold=8,
               cellsPerColumn=8,
               segmentedProximal=True,
               segmentedPooling=True,
               minicolumnSDRs=None):

    self.numOperandCells = 100
    self.numActiveOperandCells = 4
    self.numResultCells = 100
    self.numActiveResultCells = 4
    self.numLocations = numLocations
    self.numActiveMinicolumns = numActiveMinicolumns

    self.contextOperandSDRs = createEvenlySpreadSDRs(
      numLocations, self.numOperandCells, self.numActiveOperandCells)
    self.resultSDRs = createEvenlySpreadSDRs(
      numLocations, self.numResultCells, self.numActiveResultCells)
    self.drivingOperandSDRs = createEvenlySpreadSDRs(
      numLocations, self.numOperandCells, self.numActiveOperandCells)

    if minicolumnSDRs is None:
      self.minicolumnSDRs = createEvenlySpreadSDRs(
        self.numLocations, numMinicolumns, numActiveMinicolumns)
    else:
      assert len(minicolumnSDRs) >= self.numLocations
      self.minicolumnSDRs = list(minicolumnSDRs)
      random.shuffle(self.minicolumnSDRs)

    self.minicolumnParams = {
      "cellCount": numMinicolumns,
      "inputSize": self.numOperandCells,
      "threshold": self.numActiveOperandCells,
    }
    if segmentedProximal:
      self.pairLayerProximalConnections = SegmentedForwardModel(
        **self.minicolumnParams)
    else:
      self.pairLayerProximalConnections = ForwardModel(**self.minicolumnParams)

    self.pairParams = {
      "columnCount": numMinicolumns,
      "initialPermanence": 1.0,
      "cellsPerColumn": cellsPerColumn,
      "basalInputSize": self.numOperandCells,
      "activationThreshold": self.numActiveOperandCells,
      "minThreshold": self.numActiveOperandCells,
    }
    self.pairLayer = ApicalTiebreakPairMemory(**self.pairParams)

    self.poolingParams = {
      "cellCount": self.numResultCells,
      "inputSize": self.pairLayer.numberOfCells(),
      "threshold": poolingThreshold,
    }
    if segmentedPooling:
      self.poolingLayer = SegmentedForwardModel(**self.poolingParams)
    else:
      self.poolingLayer = ForwardModel(**self.poolingParams)


  def train(self):
    """
    Train the pair layer and pooling layer.
    """
    for iDriving, cDriving in enumerate(self.drivingOperandSDRs):
      minicolumnSDR = self.minicolumnSDRs[iDriving]
      self.pairLayerProximalConnections.associate(minicolumnSDR, cDriving)
      for iContext, cContext in enumerate(self.contextOperandSDRs):
        iResult = (iContext + iDriving) % self.numLocations
        cResult = self.resultSDRs[iResult]
        self.pairLayer.compute(minicolumnSDR, basalInput=cContext)
        cPair = self.pairLayer.getWinnerCells()
        self.poolingLayer.associate(cResult, cPair)


  def trainWithSpecificPairSDRs(self, pairLayerContexts):
    """
    Train the pair layer and pooling layer, manually choosing which contexts
    each cell will encode (i.e. the pair layer's distal connections).

    @param pairLayerContexts (list of lists of lists of ints)
    iContext integers for each cell, grouped by minicolumn. For example,
      [[[1, 3], [2,4]],
       [[1, 2]]]
    would specify that cell 0 connects to location 1 and location 3, while cell
    1 connects to locations 2 and 4, and cell 2 (in the second minicolumn)
    connects to locations 1 and 2.
    """
    # Grow basal segments in the pair layer.
    for iMinicolumn, contextsByCell in enumerate(pairLayerContexts):
      for iCell, cellContexts in enumerate(contextsByCell):
        iCellAbsolute = iMinicolumn*self.pairLayer.getCellsPerColumn() + iCell
        for context in cellContexts:
          segments = self.pairLayer.basalConnections.createSegments(
            [iCellAbsolute])
          self.pairLayer.basalConnections.growSynapses(
            segments, self.contextOperandSDRs[context], 1.0)

    # Associate the pair layer's minicolumn SDRs with offset cell SDRs,
    # and associate the pooling layer's location SDRs with a pool of pair SDRs.
    for iDriving, cDriving in enumerate(self.drivingOperandSDRs):
      minicolumnSDR = self.minicolumnSDRs[iDriving]
      self.pairLayerProximalConnections.associate(minicolumnSDR, cDriving)

      for iContext, cContext in enumerate(self.contextOperandSDRs):
        iResult = (iContext + iDriving) % self.numLocations
        cResult = self.resultSDRs[iResult]
        cPair = [
          iMinicolumn*self.pairLayer.getCellsPerColumn() + iCell
          for iMinicolumn in minicolumnSDR
          for iCell, cellContexts in enumerate(pairLayerContexts[iMinicolumn])
          if iContext in cellContexts]
        assert len(cPair) == len(minicolumnSDR)

        self.poolingLayer.associate(cResult, cPair)


  def testInferenceOnUnions(self, unionSize, numTests=300):
    """
    Select a random driving operand and a random union of context operands.
    Test how well outputs a union of results.

    Perform the test multiple times with different random selections.
    """
    additionalSDRCounts = []

    for _ in xrange(numTests):
      iContexts = random.sample(xrange(self.numLocations), unionSize)
      iDriving = random.choice(xrange(self.numLocations))
      cDriving = self.drivingOperandSDRs[iDriving]

      cContext = np.unique(np.concatenate(
        [self.contextOperandSDRs[iContext]
         for iContext in iContexts]))
      cResultExpected = np.unique(np.concatenate(
        [self.resultSDRs[(iContext + iDriving) % self.numLocations]
         for iContext in iContexts]))

      self.pairLayerProximalConnections.infer(cDriving)
      minicolumnSDR = self.pairLayerProximalConnections.activeCells
      assert minicolumnSDR.size == self.numActiveMinicolumns

      self.pairLayer.compute(minicolumnSDR, basalInput=cContext, learn=False)
      self.poolingLayer.infer(self.pairLayer.getActiveCells())

      assert np.all(np.in1d(cResultExpected, self.poolingLayer.activeCells))

      additionalSDRCounts.append(
        np.setdiff1d(self.poolingLayer.activeCells,
                     cResultExpected).size / self.numActiveResultCells
      )

    return additionalSDRCounts
class PIUNCorticalColumn(object):
    """
  A L4 + L6a network. Sensory input causes minicolumns in L4 to activate,
  which drives activity in L6a. Motor input causes L6a to perform path
  integration, updating its activity, which then depolarizes cells in L4.

  Whenever the sensor moves, call movementCompute. Whenever a sensory input
  arrives, call sensoryCompute.
  """
    def __init__(self, locationConfigs, L4Overrides=None, bumpType="gaussian"):
        """
    @param L4Overrides (dict)
    Custom parameters for L4

    @param locationConfigs (sequence of dicts)
    Parameters for the location modules
    """
        self.bumpType = bumpType

        L4cellCount = 150 * 16
        if bumpType == "gaussian":
            self.L6aModules = [
                createRatModuleFromCellCount(anchorInputSize=L4cellCount,
                                             **config)
                for config in locationConfigs
            ]
        elif bumpType == "gaussian2":
            self.L6aModules = [
                createRatModuleFromReadoutResolution(
                    anchorInputSize=L4cellCount, **config)
                for config in locationConfigs
            ]
        elif bumpType == "square":
            self.L6aModules = [
                Superficial2DLocationModule(anchorInputSize=L4cellCount,
                                            **config)
                for config in locationConfigs
            ]
        else:
            raise ValueError("Invalid bumpType", bumpType)

        L4Params = {
            "columnCount":
            150,
            "cellsPerColumn":
            16,
            "basalInputSize":
            sum(module.numberOfCells() for module in self.L6aModules)
        }

        if L4Overrides is not None:
            L4Params.update(L4Overrides)
        self.L4 = ApicalTiebreakPairMemory(**L4Params)

    def movementCompute(self,
                        displacement,
                        noiseFactor=0,
                        moduleNoiseFactor=0):
        """
    @param displacement (dict)
    The change in location. Example: {"top": 10, "left", 10}

    @return (dict)
    Data for logging/tracing.
    """

        if noiseFactor != 0:
            xdisp = np.random.normal(0, noiseFactor)
            ydisp = np.random.normal(0, noiseFactor)
        else:
            xdisp = 0
            ydisp = 0

        locationParams = {
            "displacement":
            [displacement["top"] + ydisp, displacement["left"] + xdisp],
            "noiseFactor":
            moduleNoiseFactor
        }

        for module in self.L6aModules:
            module.movementCompute(**locationParams)

        return locationParams

    def sensoryCompute(self, activeMinicolumns, learn):
        """
    @param activeMinicolumns (numpy array)
    List of indices of minicolumns to activate.

    @param learn (bool)
    If True, the two layers should learn this association.

    @return (tuple of dicts)
    Data for logging/tracing.
    """
        inputParams = {
            "activeColumns": activeMinicolumns,
            "basalInput": self.getLocationRepresentation(),
            "basalGrowthCandidates": self.getLearnableLocationRepresentation(),
            "learn": learn
        }
        self.L4.compute(**inputParams)

        locationParams = {
            "anchorInput": self.L4.getActiveCells(),
            "anchorGrowthCandidates": self.L4.getWinnerCells(),
            "learn": learn,
        }
        for module in self.L6aModules:
            module.sensoryCompute(**locationParams)

        return (inputParams, locationParams)

    def reset(self):
        """
    Clear all cell activity.
    """
        self.L4.reset()
        for module in self.L6aModules:
            module.reset()

    def activateRandomLocation(self):
        """
    Activate a random location in the location layer.
    """
        for module in self.L6aModules:
            module.activateRandomLocation()

    def getSensoryRepresentation(self):
        """
    Gets the active cells in the sensory layer.
    """
        return self.L4.getActiveCells()

    def getLocationRepresentation(self):
        """
    Get the full population representation of the location layer.
    """
        activeCells = np.array([], dtype="uint32")

        totalPrevCells = 0
        for module in self.L6aModules:
            activeCells = np.append(activeCells,
                                    module.getActiveCells() + totalPrevCells)
            totalPrevCells += module.numberOfCells()

        return activeCells

    def getLearnableLocationRepresentation(self):
        """
    Get the cells in the location layer that should be associated with the
    sensory input layer representation. In some models, this is identical to the
    active cells. In others, it's a subset.
    """
        learnableCells = np.array([], dtype="uint32")

        totalPrevCells = 0
        for module in self.L6aModules:
            learnableCells = np.append(
                learnableCells,
                module.getLearnableCells() + totalPrevCells)
            totalPrevCells += module.numberOfCells()

        return learnableCells

    def getSensoryAssociatedLocationRepresentation(self):
        """
    Get the location cells in the location layer that were driven by the input
    layer (or, during learning, were associated with this input.)
    """
        cells = np.array([], dtype="uint32")

        totalPrevCells = 0
        for module in self.L6aModules:
            cells = np.append(cells,
                              module.sensoryAssociatedCells + totalPrevCells)
            totalPrevCells += module.numberOfCells()

        return cells
class PIUNCorticalColumn(object):
  """
  A L4 + L6a network. Sensory input causes minicolumns in L4 to activate,
  which drives activity in L6a. Motor input causes L6a to perform path
  integration, updating its activity, which then depolarizes cells in L4.

  Whenever the sensor moves, call movementCompute. Whenever a sensory input
  arrives, call sensoryCompute.
  """

  def __init__(self, locationConfigs, L4Overrides=None, bumpType="gaussian"):
    """
    @param L4Overrides (dict)
    Custom parameters for L4

    @param locationConfigs (sequence of dicts)
    Parameters for the location modules
    """
    self.bumpType = bumpType

    L4cellCount = 150*16
    if bumpType == "gaussian":
      self.L6aModules = [
        createRatModuleFromCellCount(
          anchorInputSize=L4cellCount,
          **config)
        for config in locationConfigs]
    elif bumpType == "gaussian2":
      self.L6aModules = [
        createRatModuleFromReadoutResolution(
          anchorInputSize=L4cellCount,
          **config)
        for config in locationConfigs]
    elif bumpType == "square":
      self.L6aModules = [
        Superficial2DLocationModule(
          anchorInputSize=L4cellCount,
          **config)
        for config in locationConfigs]
    else:
      raise ValueError("Invalid bumpType", bumpType)

    L4Params = {
      "columnCount": 150,
      "cellsPerColumn": 16,
      "basalInputSize": sum(module.numberOfCells()
                            for module in self.L6aModules)
    }

    if L4Overrides is not None:
      L4Params.update(L4Overrides)
    self.L4 = ApicalTiebreakPairMemory(**L4Params)



  def movementCompute(self, displacement, noiseFactor = 0, moduleNoiseFactor = 0):
    """
    @param displacement (dict)
    The change in location. Example: {"top": 10, "left", 10}

    @return (dict)
    Data for logging/tracing.
    """

    if noiseFactor != 0:
      xdisp = np.random.normal(0, noiseFactor)
      ydisp = np.random.normal(0, noiseFactor)
    else:
      xdisp = 0
      ydisp = 0

    locationParams = {
      "displacement": [displacement["top"] + ydisp,
                       displacement["left"] + xdisp],
      "noiseFactor": moduleNoiseFactor
    }

    for module in self.L6aModules:
      module.movementCompute(**locationParams)

    return locationParams


  def sensoryCompute(self, activeMinicolumns, learn):
    """
    @param activeMinicolumns (numpy array)
    List of indices of minicolumns to activate.

    @param learn (bool)
    If True, the two layers should learn this association.

    @return (tuple of dicts)
    Data for logging/tracing.
    """
    inputParams = {
      "activeColumns": activeMinicolumns,
      "basalInput": self.getLocationRepresentation(),
      "basalGrowthCandidates": self.getLearnableLocationRepresentation(),
      "learn": learn
    }
    self.L4.compute(**inputParams)

    locationParams = {
      "anchorInput": self.L4.getActiveCells(),
      "anchorGrowthCandidates": self.L4.getWinnerCells(),
      "learn": learn,
    }
    for module in self.L6aModules:
      module.sensoryCompute(**locationParams)

    return (inputParams, locationParams)


  def reset(self):
    """
    Clear all cell activity.
    """
    self.L4.reset()
    for module in self.L6aModules:
      module.reset()


  def activateRandomLocation(self):
    """
    Activate a random location in the location layer.
    """
    for module in self.L6aModules:
      module.activateRandomLocation()


  def getSensoryRepresentation(self):
    """
    Gets the active cells in the sensory layer.
    """
    return self.L4.getActiveCells()


  def getLocationRepresentation(self):
    """
    Get the full population representation of the location layer.
    """
    activeCells = np.array([], dtype="uint32")

    totalPrevCells = 0
    for module in self.L6aModules:
      activeCells = np.append(activeCells,
                              module.getActiveCells() + totalPrevCells)
      totalPrevCells += module.numberOfCells()

    return activeCells


  def getLearnableLocationRepresentation(self):
    """
    Get the cells in the location layer that should be associated with the
    sensory input layer representation. In some models, this is identical to the
    active cells. In others, it's a subset.
    """
    learnableCells = np.array([], dtype="uint32")

    totalPrevCells = 0
    for module in self.L6aModules:
      learnableCells = np.append(learnableCells,
                                 module.getLearnableCells() + totalPrevCells)
      totalPrevCells += module.numberOfCells()

    return learnableCells


  def getSensoryAssociatedLocationRepresentation(self):
    """
    Get the location cells in the location layer that were driven by the input
    layer (or, during learning, were associated with this input.)
    """
    cells = np.array([], dtype="uint32")

    totalPrevCells = 0
    for module in self.L6aModules:
      cells = np.append(cells,
                        module.sensoryAssociatedCells + totalPrevCells)
      totalPrevCells += module.numberOfCells()

    return cells
Example #4
0
class Grid2DLocationExperiment(object):
    """
  The experiment code organized into a class.
  """
    def __init__(self, objects, objectPlacements, featureNames,
                 locationConfigs, worldDimensions):

        self.objects = objects
        self.objectPlacements = objectPlacements
        self.worldDimensions = worldDimensions

        self.features = dict(
            (k,
             np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32"))
            for k in featureNames)

        self.locationModules = [
            SuperficialLocationModule2D(anchorInputSize=150 * 32, **config)
            for config in locationConfigs
        ]

        self.inputLayer = ApicalTiebreakPairMemory(
            **{
                "columnCount":
                150,
                "cellsPerColumn":
                32,
                "basalInputSize":
                18 * sum(
                    np.prod(config["cellDimensions"])
                    for config in locationConfigs),
                "apicalInputSize":
                4096
            })

        self.objectLayer = ColumnPooler(**{"inputWidth": 150 * 32})

        # Use these for classifying SDRs and for testing whether they're correct.
        self.locationRepresentations = {
            # Example:
            # (objectName, (top, left)): [0, 26, 54, 77, 101, ...]
        }
        self.inputRepresentations = {
            # Example:
            # (objectName, (top, left), featureName): [0, 26, 54, 77, 101, ...]
        }
        self.objectRepresentations = {
            # Example:
            # objectName: [14, 19, 54, 107, 201, ...]
        }

        self.locationInWorld = None

        self.maxSettlingTime = 10

        self.monitors = {}
        self.nextMonitorToken = 1

    def addMonitor(self, monitor):
        """
    Subscribe to Grid2DLocationExperimentMonitor events.

    @param monitor (Grid2DLocationExperimentMonitor)
    An object that implements a set of monitor methods

    @return (object)
    An opaque object that can be used to refer to this monitor.
    """

        token = self.nextMonitorToken
        self.nextMonitorToken += 1

        self.monitors[token] = monitor

        return token

    def removeMonitor(self, monitorToken):
        """
    Unsubscribe from LocationExperiment events.

    @param monitorToken (object)
    The return value of addMonitor() from when this monitor was added
    """
        del self.monitors[monitorToken]

    def getActiveLocationCells(self):
        activeCells = np.array([], dtype="uint32")

        totalPrevCells = 0
        for i, module in enumerate(self.locationModules):
            activeCells = np.append(activeCells,
                                    module.getActiveCells() + totalPrevCells)
            totalPrevCells += module.numberOfCells()

        return activeCells

    def move(self, objectName, locationOnObject):
        objectPlacement = self.objectPlacements[objectName]
        locationInWorld = (objectPlacement[0] + locationOnObject[0],
                           objectPlacement[1] + locationOnObject[1])

        if self.locationInWorld is not None:
            deltaLocation = (locationInWorld[0] - self.locationInWorld[0],
                             locationInWorld[1] - self.locationInWorld[1])

            for monitor in self.monitors.values():
                monitor.beforeMove(deltaLocation)

            params = {"deltaLocation": deltaLocation}
            for module in self.locationModules:
                module.shift(**params)

            for monitor in self.monitors.values():
                monitor.afterLocationShift(**params)

        self.locationInWorld = locationInWorld
        for monitor in self.monitors.values():
            monitor.afterWorldLocationChanged(locationInWorld)

    def _senseInferenceMode(self, featureSDR):
        prevCellActivity = None
        for i in xrange(self.maxSettlingTime):
            inputParams = {
                "activeColumns": featureSDR,
                "basalInput": self.getActiveLocationCells(),
                "apicalInput": self.objectLayer.getActiveCells(),
                "learn": False
            }
            self.inputLayer.compute(**inputParams)

            objectParams = {
                "feedforwardInput":
                self.inputLayer.getActiveCells(),
                "feedforwardGrowthCandidates":
                self.inputLayer.getPredictedActiveCells(),
                "learn":
                False,
            }
            self.objectLayer.compute(**objectParams)

            locationParams = {"anchorInput": self.inputLayer.getActiveCells()}
            for module in self.locationModules:
                module.anchor(**locationParams)

            cellActivity = (set(self.objectLayer.getActiveCells()),
                            set(self.inputLayer.getActiveCells()),
                            set(self.getActiveLocationCells()))

            if cellActivity == prevCellActivity:
                # It settled. Don't even log this timestep.
                break
            else:
                prevCellActivity = cellActivity
                for monitor in self.monitors.values():
                    if i > 0:
                        monitor.markSensoryRepetition()

                    monitor.afterInputCompute(**inputParams)
                    monitor.afterObjectCompute(**objectParams)
                    monitor.afterLocationAnchor(**locationParams)

    def _senseLearningMode(self, featureSDR):
        inputParams = {
            "activeColumns": featureSDR,
            "basalInput": self.getActiveLocationCells(),
            "apicalInput": self.objectLayer.getActiveCells(),
            "learn": True
        }
        self.inputLayer.compute(**inputParams)

        objectParams = {
            "feedforwardInput":
            self.inputLayer.getActiveCells(),
            "feedforwardGrowthCandidates":
            self.inputLayer.getPredictedActiveCells(),
            "learn":
            True,
        }
        self.objectLayer.compute(**objectParams)

        locationParams = {"anchorInput": self.inputLayer.getWinnerCells()}
        for module in self.locationModules:
            module.learn(**locationParams)

        for monitor in self.monitors.values():
            monitor.afterInputCompute(**inputParams)
            monitor.afterObjectCompute(**objectParams)

    def sense(self, featureSDR, learn):
        for monitor in self.monitors.values():
            monitor.beforeSense(featureSDR)

        if learn:
            self._senseLearningMode(featureSDR)
        else:
            self._senseInferenceMode(featureSDR)

    def learnObjects(self):
        """
    Learn each provided object.

    This method simultaneously learns 4 sets of synapses:
    - location -> input
    - input -> location
    - input -> object
    - object -> input
    """
        for objectName, objectFeatures in self.objects.iteritems():
            self.reset()

            for module in self.locationModules:
                module.activateRandomLocation()

            for feature in objectFeatures:
                locationOnObject = (feature["top"] + feature["height"] / 2,
                                    feature["left"] + feature["width"] / 2)
                self.move(objectName, locationOnObject)

                featureName = feature["name"]
                featureSDR = self.features[featureName]
                for _ in xrange(10):
                    self.sense(featureSDR, learn=True)

                self.locationRepresentations[(
                    objectName,
                    locationOnObject)] = (self.getActiveLocationCells())
                self.inputRepresentations[(
                    objectName, locationOnObject,
                    featureName)] = (self.inputLayer.getActiveCells())

            self.objectRepresentations[
                objectName] = self.objectLayer.getActiveCells()

    def inferObjectsWithRandomMovements(self):
        """
    Infer each object without any location input.
    """
        for objectName, objectFeatures in self.objects.iteritems():
            self.reset()

            inferred = False
            prevTouchSequence = None

            for _ in xrange(4):

                while True:
                    touchSequence = list(objectFeatures)
                    random.shuffle(touchSequence)

                    if prevTouchSequence is not None:
                        if touchSequence[0] == prevTouchSequence[-1]:
                            continue

                    break

                for i, feature in enumerate(touchSequence):
                    locationOnObject = (feature["top"] + feature["height"] / 2,
                                        feature["left"] + feature["width"] / 2)
                    self.move(objectName, locationOnObject)

                    featureName = feature["name"]
                    featureSDR = self.features[featureName]
                    self.sense(featureSDR, learn=False)

                    inferred = (
                        set(self.objectLayer.getActiveCells()) == set(
                            self.objectRepresentations[objectName])
                        and set(self.inputLayer.getActiveCells()) == set(
                            self.inputRepresentations[(objectName,
                                                       locationOnObject,
                                                       featureName)])
                        and set(self.getActiveLocationCells()) == set(
                            self.locationRepresentations[(objectName,
                                                          locationOnObject)]))

                    if inferred:
                        break

                prevTouchSequence = touchSequence

                if inferred:
                    break

    def reset(self):
        for module in self.locationModules:
            module.reset()
        self.objectLayer.reset()
        self.inputLayer.reset()

        self.locationInWorld = None

        for monitor in self.monitors.values():
            monitor.afterReset()
Example #5
0
class PIUNCorticalColumn(object):
    """
  A L4 + L6a network. Sensory input causes minicolumns in L4 to activate,
  which drives activity in L6a. Motor input causes L6a to perform path
  integration, updating its activity, which then depolarizes cells in L4.

  Whenever the sensor moves, call movementCompute. Whenever a sensory input
  arrives, call sensoryCompute.
  """
    def __init__(self, locationConfigs, L4Overrides=None):
        """
    @param L4Overrides (dict)
    Custom parameters for L4

    @param locationConfigs (sequence of dicts)
    Parameters for the location modules
    """
        L4Params = {
            "columnCount":
            150,
            "cellsPerColumn":
            16,
            "basalInputSize": (len(locationConfigs) * sum(
                np.prod(config["cellDimensions"])
                for config in locationConfigs))
        }
        if L4Overrides is not None:
            L4Params.update(L4Overrides)

        self.L4 = ApicalTiebreakPairMemory(**L4Params)

        self.L6aModules = [
            Superficial2DLocationModule(
                anchorInputSize=self.L4.numberOfCells(), **config)
            for config in locationConfigs
        ]

    def movementCompute(self,
                        displacement,
                        noiseFactor=0,
                        moduleNoiseFactor=0):
        """
    @param displacement (dict)
    The change in location. Example: {"top": 10, "left", 10}

    @return (dict)
    Data for logging/tracing.
    """

        if noiseFactor != 0:
            xdisp = np.random.normal(0, noiseFactor)
            ydisp = np.random.normal(0, noiseFactor)
        else:
            xdisp = 0
            ydisp = 0

        locationParams = {
            "displacement":
            [displacement["top"] + ydisp, displacement["left"] + xdisp],
            "noiseFactor":
            moduleNoiseFactor
        }

        for module in self.L6aModules:
            module.movementCompute(**locationParams)

        return locationParams

    def sensoryCompute(self, activeMinicolumns, learn):
        """
    @param activeMinicolumns (numpy array)
    List of indices of minicolumns to activate.

    @param learn (bool)
    If True, the two layers should learn this association.

    @return (tuple of dicts)
    Data for logging/tracing.
    """
        inputParams = {
            "activeColumns": activeMinicolumns,
            "basalInput": self.getLocationRepresentation(),
            "learn": learn
        }
        self.L4.compute(**inputParams)

        locationParams = {
            "anchorInput": self.L4.getActiveCells(),
            "anchorGrowthCandidates": self.L4.getWinnerCells(),
            "learn": learn,
        }
        for module in self.L6aModules:
            module.sensoryCompute(**locationParams)

        return (inputParams, locationParams)

    def reset(self):
        """
    Clear all cell activity.
    """
        self.L4.reset()
        for module in self.L6aModules:
            module.reset()

    def activateRandomLocation(self):
        """
    Activate a random location in the location layer.
    """
        for module in self.L6aModules:
            module.activateRandomLocation()

    def getSensoryRepresentation(self):
        """
    Gets the active cells in the sensory layer.
    """
        return self.L4.getActiveCells()

    def getLocationRepresentation(self):
        """
    Get the full population representation of the location layer.
    """
        activeCells = np.array([], dtype="uint32")

        totalPrevCells = 0
        for module in self.L6aModules:
            activeCells = np.append(activeCells,
                                    module.getActiveCells() + totalPrevCells)
            totalPrevCells += module.numberOfCells()

        return activeCells
class PIUNCorticalColumn(object):
  """
  A L4 + L6a network. Sensory input causes minicolumns in L4 to activate,
  which drives activity in L6a. Motor input causes L6a to perform path
  integration, updating its activity, which then depolarizes cells in L4.

  Whenever the sensor moves, call movementCompute. Whenever a sensory input
  arrives, call sensoryCompute.
  """

  def __init__(self, locationConfigs, L4Overrides=None):
    """
    @param L4Overrides (dict)
    Custom parameters for L4

    @param locationConfigs (sequence of dicts)
    Parameters for the location modules
    """
    L4Params = {
      "columnCount": 150,
      "cellsPerColumn": 16,
      "basalInputSize": (len(locationConfigs) *
                         sum(np.prod(config["cellDimensions"])
                             for config in locationConfigs))
    }
    if L4Overrides is not None:
      L4Params.update(L4Overrides)

    self.L4 = ApicalTiebreakPairMemory(**L4Params)

    self.L6aModules = [
      Superficial2DLocationModule(
        anchorInputSize=self.L4.numberOfCells(),
        **config)
      for config in locationConfigs]


  def movementCompute(self, displacement, noiseFactor = 0, moduleNoiseFactor = 0):
    """
    @param displacement (dict)
    The change in location. Example: {"top": 10, "left", 10}

    @return (dict)
    Data for logging/tracing.
    """

    if noiseFactor != 0:
      xdisp = np.random.normal(0, noiseFactor)
      ydisp = np.random.normal(0, noiseFactor)
    else:
      xdisp = 0
      ydisp = 0

    locationParams = {
      "displacement": [displacement["top"] + ydisp,
                       displacement["left"] + xdisp],
      "noiseFactor": moduleNoiseFactor
    }

    for module in self.L6aModules:
      module.movementCompute(**locationParams)

    return locationParams


  def sensoryCompute(self, activeMinicolumns, learn):
    """
    @param activeMinicolumns (numpy array)
    List of indices of minicolumns to activate.

    @param learn (bool)
    If True, the two layers should learn this association.

    @return (tuple of dicts)
    Data for logging/tracing.
    """
    inputParams = {
      "activeColumns": activeMinicolumns,
      "basalInput": self.getLocationRepresentation(),
      "learn": learn
    }
    self.L4.compute(**inputParams)

    locationParams = {
      "anchorInput": self.L4.getActiveCells(),
      "anchorGrowthCandidates": self.L4.getWinnerCells(),
      "learn": learn,
    }
    for module in self.L6aModules:
      module.sensoryCompute(**locationParams)

    return (inputParams, locationParams)


  def reset(self):
    """
    Clear all cell activity.
    """
    self.L4.reset()
    for module in self.L6aModules:
      module.reset()


  def activateRandomLocation(self):
    """
    Activate a random location in the location layer.
    """
    for module in self.L6aModules:
      module.activateRandomLocation()


  def getSensoryRepresentation(self):
    """
    Gets the active cells in the sensory layer.
    """
    return self.L4.getActiveCells()


  def getLocationRepresentation(self):
    """
    Get the full population representation of the location layer.
    """
    activeCells = np.array([], dtype="uint32")

    totalPrevCells = 0
    for module in self.L6aModules:
      activeCells = np.append(activeCells,
                              module.getActiveCells() + totalPrevCells)
      totalPrevCells += module.numberOfCells()

    return activeCells
Example #7
0
def main():

    DIR = "./sim_data"

    # Odom Encoder 
    xSDR = ScalarEncoder(w=21,minval=0,maxval=20,n=256)
    ySDR = ScalarEncoder(w=21,minval=0,maxval=20,n=256)
    xyWidth = xSDR.getWidth() + ySDR.getWidth()

    # Visual input
    D = np.loadtxt(DIR + '/seq_multi_loop_noise05_al5.txt', dtype='i', delimiter=',')
    numberImages = D[:,0].size
    nColumns = D[0,:].size
    #time.sleep(10)
    
    # Odom input
    odom = np.loadtxt(DIR + '/seq_multi_loop_noise05_al5_gt.txt', dtype='f', delimiter=',')
    x = odom[:,0]
    y = odom[:,1]

    # Encoder Odom input
    odomSDR = np.zeros((numberImages,xyWidth), dtype=int)
    for i in range(1):
        _xSDR = np.zeros(xSDR.getWidth(), dtype=int)
        xSDR.encodeIntoArray(x[i], _xSDR)
        _ySDR = np.zeros(ySDR.getWidth(), dtype=int)
        ySDR.encodeIntoArray(y[i], _ySDR)
        odomSDR[i,:] = np.concatenate([_xSDR, _ySDR])
    
    tm0 = TM(
        columnCount=nColumns,
        cellsPerColumn=4,
        initialPermanence=0.21,
        connectedPermanence=0.5,
        permanenceIncrement=0.1,
        permanenceDecrement=0.1,
        minThreshold=15,
        basalInputSize= 512,
        reducedBasalThreshold=1000,
        activationThreshold=1000,
        apicalInputSize=0,
        maxSynapsesPerSegment=-1,
        sampleSize=1,
        seed = 42
        )
        
    tm = TemporalMemory(
        # Must be the same dimensions as the SP
        columnDimensions=(2048,),
        # How many cells in each mini-column.
        cellsPerColumn=4,
        # A segment is active if it has >= activationThreshold connected synapses
        # that are active due to infActiveState
        activationThreshold=13,
        initialPermanence=0.21,
        connectedPermanence=0.5,
        # Minimum number of active synapses for a segment to be considered during
        # search for the best-matching segments.
        minThreshold=1,
        # The max number of synapses added to a segment during learning
        maxNewSynapseCount=3,
        #permanenceIncrement=0.01,
        #permanenceDecrement=0.01,
        predictedSegmentDecrement=0.0005,
        maxSegmentsPerCell=3,
        maxSynapsesPerSegment=3,
        seed=42
    )

    #time.sleep(10)

    # Simple HTM parameters
    params = Params()
    params.maxPredDepth = 0
    params.probAdditionalCon = 0.05 # probability for random connection
    params.nCellPerCol = 32 # number of cells per minicolumn
    params.nInConPerCol = int(round(np.count_nonzero(D) / D.shape[0]))
    #print params.nInConPerCol
    params.minColumnActivity = int(round(0.25*params.nInConPerCol))
    params.nColsPerPattern = 10     # minimum number of active minicolumns k_min
    params.kActiveColumn = 100      # maximum number of active minicolumns k_max
    params.kMin = 1

    # run HTM
    t = time.time()
    print ('Simple HTM')
    htm = MCN('htm',params)

    outputSDR = []
    max_index = []

    for i in range (min(numberImages,D.shape[0])):
        loop = 0 
        #print('\n-------- ITERATION %d ---------' %i)
        # skip empty vectors
        if np.count_nonzero(D[i,:]) == 0:
            print('empty vector, skip\n')
            continue
        loop += 1
        #print D[i,:]
        htm.compute(D[i,:])

        max_index.append(max(htm.winnerCells))
        outputSDR.append(htm.winnerCells)
        
    elapsed = time.time() - t
    print("Elapsed time: %f seconds\n" %elapsed)

    # create output SDR matrix from HTM winner cell output
    M = np.zeros((len(outputSDR),max(max_index)+1), dtype=int)
    for i in range(len(outputSDR)):
        for j in range(len(outputSDR[i])):
            winner = outputSDR[i][j]
            M[i][winner] = 1

    # Temporal Pooler descriptors
    print 'Temporal Pooler descriptors'
    D1_tm=[]
    id_max1=[]
    t = time.time()

    for i in range(min(numberImages,D.shape[0])):
        D1_sp = np.nonzero(D[i,:])[0]
        tm.compute(D1_sp, learn=True)
        activeCells = tm.getWinnerCells()
        D1_tm.append(activeCells)
        id_max1.append(max(activeCells))
    
    elapsed = time.time() - t
    print( "Elapsed time: %f seconds\n" %elapsed)

    # create output SDR matrix from HTM winner cell output
    T = np.zeros((len(D1_tm),max(id_max1)+1), dtype=int)
    for i in range(len(D1_tm)):
        for j in range(len(D1_tm[i])):
            winner = D1_tm[i][j]
            T[i][winner] = 1


    # Temporal Pooler - Distal connections
    print 'Temporal Pooler - Distal connections'
    D2_tm=[]
    id_max2=[]
    t = time.time()

    for i in range(min(numberImages,D.shape[0])):
        D2_sp = np.nonzero(D[i,:])[0]
        basalInputs = np.nonzero(odomSDR[i,:])[0]
        tm0.compute(sorted(D2_sp), sorted(basalInputs), apicalInput=(), basalGrowthCandidates=None, apicalGrowthCandidates=None, learn=True)
        activeCells2 = tm0.getWinnerCells()
        D2_tm.append(activeCells2)
        id_max2.append(max(activeCells2))
    
    elapsed = time.time() - t
    print( "Elapsed time: %f seconds\n" %elapsed)

    # create output SDR matrix from HTM winner cell output
    T2 = np.zeros((len(D2_tm),max(id_max2)+1), dtype=int)
    for i in range(len(D2_tm)):
        for j in range(len(D2_tm[i])):
            winner = D2_tm[i][j]
            T2[i][winner] = 1

    # Create ground truth and show precision-recall curves
    GT_data = np.loadtxt(DIR + '/seq_multi_loop_noNoise_gt.txt', dtype='i', delimiter=',',skiprows=1)
    GT = np.zeros((numberImages,numberImages), dtype=int)
    for i in range(GT.shape[0]):
        for j in range(i,GT.shape[1]):
            GT[i,j] = (np.any(GT_data[i,:] != GT_data[j,:])==False)

    # Results
    print ('Results')
    fig, ax = plt.subplots()

    S0 = evaluateSimilarity(D)
    P, R = createPR(S0,GT)
    ax.plot(R, P, label='InputSDR: (avgP=%f)' %np.trapz(P,R))

    S1 = evaluateSimilarity(M)
    P, R = createPR(S1,GT)
    ax.plot(R, P, label='MCN (avgP=%f)' %np.trapz(P,R))

    S2 = evaluateSimilarity(T)
    P, R = createPR(S2,GT)
    ax.plot(R, P, label='HTM (avgP=%f)' %np.trapz(P,R))

    S3 = evaluateSimilarity(T2)
    P, R = createPR(S3,GT)
    ax.plot(R, P, label='HTM Distal (avgP=%f)' %np.trapz(P,R))

    ax.legend()
    ax.grid(True)
    plt.xlabel("Recall")
    plt.ylabel("Precision")
    plt.show()

    '''
class Grid2DLocationExperiment(object):
  """
  The experiment code organized into a class.
  """

  def __init__(self, objects, objectPlacements, featureNames, locationConfigs,
               worldDimensions):

    self.objects = objects
    self.objectPlacements = objectPlacements
    self.worldDimensions = worldDimensions

    self.features = dict(
      (k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32"))
      for k in featureNames)

    self.locationModules = [SuperficialLocationModule2D(anchorInputSize=150*32,
                                                        **config)
                            for config in locationConfigs]

    self.inputLayer = ApicalTiebreakPairMemory(**{
      "columnCount": 150,
      "cellsPerColumn": 32,
      "basalInputSize": 18 * sum(np.prod(config["cellDimensions"])
                                 for config in locationConfigs),
      "apicalInputSize": 4096
    })

    self.objectLayer = ColumnPooler(**{
      "inputWidth": 150 * 32
    })

    # Use these for classifying SDRs and for testing whether they're correct.
    self.locationRepresentations = {
      # Example:
      # (objectName, (top, left)): [0, 26, 54, 77, 101, ...]
    }
    self.inputRepresentations = {
      # Example:
      # (objectName, (top, left), featureName): [0, 26, 54, 77, 101, ...]
    }
    self.objectRepresentations = {
      # Example:
      # objectName: [14, 19, 54, 107, 201, ...]
    }

    self.locationInWorld = None

    self.maxSettlingTime = 10

    self.monitors = {}
    self.nextMonitorToken = 1


  def addMonitor(self, monitor):
    """
    Subscribe to Grid2DLocationExperimentMonitor events.

    @param monitor (Grid2DLocationExperimentMonitor)
    An object that implements a set of monitor methods

    @return (object)
    An opaque object that can be used to refer to this monitor.
    """

    token = self.nextMonitorToken
    self.nextMonitorToken += 1

    self.monitors[token] = monitor

    return token


  def removeMonitor(self, monitorToken):
    """
    Unsubscribe from LocationExperiment events.

    @param monitorToken (object)
    The return value of addMonitor() from when this monitor was added
    """
    del self.monitors[monitorToken]


  def getActiveLocationCells(self):
    activeCells = np.array([], dtype="uint32")

    totalPrevCells = 0
    for i, module in enumerate(self.locationModules):
      activeCells = np.append(activeCells,
                              module.getActiveCells() + totalPrevCells)
      totalPrevCells += module.numberOfCells()

    return activeCells


  def move(self, objectName, locationOnObject):
    objectPlacement = self.objectPlacements[objectName]
    locationInWorld = (objectPlacement[0] + locationOnObject[0],
                       objectPlacement[1] + locationOnObject[1])

    if self.locationInWorld is not None:
      deltaLocation = (locationInWorld[0] - self.locationInWorld[0],
                       locationInWorld[1] - self.locationInWorld[1])

      for monitor in self.monitors.values():
        monitor.beforeMove(deltaLocation)

      params = {
        "deltaLocation": deltaLocation
      }
      for module in self.locationModules:
        module.shift(**params)

      for monitor in self.monitors.values():
        monitor.afterLocationShift(**params)

    self.locationInWorld = locationInWorld
    for monitor in self.monitors.values():
      monitor.afterWorldLocationChanged(locationInWorld)


  def _senseInferenceMode(self, featureSDR):
    prevCellActivity = None
    for i in xrange(self.maxSettlingTime):
      inputParams = {
        "activeColumns": featureSDR,
        "basalInput": self.getActiveLocationCells(),
        "apicalInput": self.objectLayer.getActiveCells(),
        "learn": False
      }
      self.inputLayer.compute(**inputParams)

      objectParams = {
        "feedforwardInput": self.inputLayer.getActiveCells(),
        "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
        "learn": False,
      }
      self.objectLayer.compute(**objectParams)

      locationParams = {
        "anchorInput": self.inputLayer.getActiveCells()
      }
      for module in self.locationModules:
        module.anchor(**locationParams)

      cellActivity = (set(self.objectLayer.getActiveCells()),
                      set(self.inputLayer.getActiveCells()),
                      set(self.getActiveLocationCells()))

      if cellActivity == prevCellActivity:
        # It settled. Don't even log this timestep.
        break
      else:
        prevCellActivity = cellActivity
        for monitor in self.monitors.values():
          if i > 0:
            monitor.markSensoryRepetition()

          monitor.afterInputCompute(**inputParams)
          monitor.afterObjectCompute(**objectParams)
          monitor.afterLocationAnchor(**locationParams)


  def _senseLearningMode(self, featureSDR):
    inputParams = {
      "activeColumns": featureSDR,
      "basalInput": self.getActiveLocationCells(),
      "apicalInput": self.objectLayer.getActiveCells(),
      "learn": True
    }
    self.inputLayer.compute(**inputParams)

    objectParams = {
      "feedforwardInput": self.inputLayer.getActiveCells(),
      "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
      "learn": True,
    }
    self.objectLayer.compute(**objectParams)

    locationParams = {
      "anchorInput": self.inputLayer.getWinnerCells()
    }
    for module in self.locationModules:
      module.learn(**locationParams)

    for monitor in self.monitors.values():
      monitor.afterInputCompute(**inputParams)
      monitor.afterObjectCompute(**objectParams)


  def sense(self, featureSDR, learn):
    for monitor in self.monitors.values():
      monitor.beforeSense(featureSDR)

    if learn:
      self._senseLearningMode(featureSDR)
    else:
      self._senseInferenceMode(featureSDR)


  def learnObjects(self):
    """
    Learn each provided object.

    This method simultaneously learns 4 sets of synapses:
    - location -> input
    - input -> location
    - input -> object
    - object -> input
    """
    for objectName, objectFeatures in self.objects.iteritems():
      self.reset()

      for module in self.locationModules:
        module.activateRandomLocation()

      for feature in objectFeatures:
        locationOnObject = (feature["top"] + feature["height"]/2,
                            feature["left"] + feature["width"]/2)
        self.move(objectName, locationOnObject)

        featureName = feature["name"]
        featureSDR = self.features[featureName]
        for _ in xrange(10):
          self.sense(featureSDR, learn=True)

        self.locationRepresentations[(objectName, locationOnObject)] = (
          self.getActiveLocationCells())
        self.inputRepresentations[(objectName, locationOnObject, featureName)] = (
          self.inputLayer.getActiveCells())

      self.objectRepresentations[objectName] = self.objectLayer.getActiveCells()


  def inferObjectsWithRandomMovements(self):
    """
    Infer each object without any location input.
    """
    for objectName, objectFeatures in self.objects.iteritems():
      self.reset()

      inferred = False
      prevTouchSequence = None

      for _ in xrange(4):

        while True:
          touchSequence = list(objectFeatures)
          random.shuffle(touchSequence)

          if prevTouchSequence is not None:
            if touchSequence[0] == prevTouchSequence[-1]:
              continue

          break

        for i, feature in enumerate(touchSequence):
          locationOnObject = (feature["top"] + feature["height"]/2,
                              feature["left"] + feature["width"]/2)
          self.move(objectName, locationOnObject)

          featureName = feature["name"]
          featureSDR = self.features[featureName]
          self.sense(featureSDR, learn=False)

          inferred = (
            set(self.objectLayer.getActiveCells()) ==
            set(self.objectRepresentations[objectName]) and

            set(self.inputLayer.getActiveCells()) ==
            set(self.inputRepresentations[(objectName,
                                           locationOnObject,
                                           featureName)]) and

            set(self.getActiveLocationCells()) ==
            set(self.locationRepresentations[(objectName, locationOnObject)]))

          if inferred:
            break

        prevTouchSequence = touchSequence

        if inferred:
          break


  def reset(self):
    for module in self.locationModules:
      module.reset()
    self.objectLayer.reset()
    self.inputLayer.reset()

    self.locationInWorld = None

    for monitor in self.monitors.values():
      monitor.afterReset()