Пример #1
0
    def testProximalLearning_SampleSize(self):
        """
    During learning, cells should attempt to have sampleSizeProximal
    active proximal synapses.

    """
        pooler = ColumnPooler(
            inputWidth=2048 * 8,
            initialProximalPermanence=0.60,
            connectedPermanenceProximal=0.50,
            sampleSizeProximal=10,
            synPermProximalDec=0,
        )

        feedforwardInput1 = range(10)

        pooler.compute(feedforwardInput1, learn=True)

        for cell in pooler.getActiveCells():
            self.assertEqual(pooler.numberOfProximalSynapses([cell]), 10,
                             "Should connect to every active input bit.")
            self.assertEqual(pooler.numberOfConnectedProximalSynapses([cell]),
                             10, "Each synapse should be marked as connected.")

            (presynapticCells,
             permanences) = pooler.proximalPermanences.rowNonZeros(cell)

            self.assertEqual(set(presynapticCells), set(feedforwardInput1),
                             "Should connect to every active input bit.")
            for perm in permanences:
                self.assertAlmostEqual(
                    perm, 0.60, msg="Should use 'initialProximalPermanence'.")

        pooler.compute(range(10, 20), learn=True)

        for cell in pooler.getActiveCells():
            self.assertEqual(pooler.numberOfProximalSynapses([cell]), 20,
                             "Should connect to every active input bit.")

        pooler.compute(range(15, 25), learn=True)

        for cell in pooler.getActiveCells():
            self.assertEqual(pooler.numberOfProximalSynapses([cell]), 25,
                             ("Should connect to every active input bit "
                              "that it's not yet connected to."))

        pooler.compute(range(0, 30), learn=True)

        for cell in pooler.getActiveCells():
            self.assertEqual(
                pooler.numberOfProximalSynapses([cell]), 25,
                "Should not grow more synapses if it had lots active.")

        pooler.compute(range(23, 30), learn=True)

        for cell in pooler.getActiveCells():
            self.assertEqual(pooler.numberOfProximalSynapses([cell]), 30,
                             "Should grow as many as it can.")
            self.assertEqual(pooler.numberOfConnectedProximalSynapses([cell]),
                             30, "Each synapse should be marked as connected.")
  def testInitialNullInputLearnMode(self):
    """Tests with no input in the beginning. """

    pooler = ColumnPooler(
      inputWidth=2048 * 8,
      columnDimensions=[2048, 1],
      maxSynapsesPerSegment=2048 * 8
    )
    activatedCells = numpy.zeros(pooler.numberOfCells())

    # Should be no active cells in beginning
    self.assertEqual(
      len(pooler.getActiveCells()),
      0,
      "Incorrect number of active cells")

    # After computing with no input should have 40 active cells
    pooler.compute(feedforwardInput=set(), learn=True)
    activatedCells[pooler.getActiveCells()] = 1
    self.assertEqual(
      activatedCells.sum(),
      40,
      "Incorrect number of active cells")

    # Should be no active cells after reset
    pooler.reset()
    self.assertEqual(len(pooler.getActiveCells()), 0,
                     "Incorrect number of active cells")

    # Computing again with no input should lead to different 40 active cells
    pooler.compute(feedforwardInput=set(), learn=True)
    activatedCells[pooler.getActiveCells()] += 1
    self.assertLess((activatedCells>=2).sum(), 5,
                    "SDRs not sufficiently different")
Пример #3
0
  def testProximalLearning_InitiallyDisconnected(self):
    """
    If the initialProximalPermanence is below the connectedPermanence, new
    synapses should not be marked as connected.

    """
    pooler = ColumnPooler(
      inputWidth=2048 * 8,
      lateralInputWidth=512,
      numActiveColumnsPerInhArea=12,
      initialProximalPermanence=0.45,
      connectedPermanence=0.50,
      maxNewProximalSynapseCount=10,
      maxNewDistalSynapseCount=10,
    )

    feedforwardInput = set(range(10))

    pooler.compute(feedforwardInput, learn=True)

    activeCells = pooler.getActiveCells()
    self.assertEqual(len(activeCells), 12)

    for cell in activeCells:
      self.assertEqual(pooler.numberOfSynapses([cell]), 10,
                       "Should connect to every active input bit.")
      self.assertEqual(pooler.numberOfConnectedSynapses([cell]), 0,
                       "The synapses shouldn't have a high enough permanence"
                       " to be connected.")
  def testProximalLearning_SampleSize(self):
    """
    During learning, cells should attempt to have sampleSizeProximal
    active proximal synapses.

    """
    pooler = ColumnPooler(
      inputWidth=2048 * 8,
      initialProximalPermanence=0.60,
      connectedPermanenceProximal=0.50,
      sampleSizeProximal=10,
      synPermProximalDec=0,
    )

    feedforwardInput1 = range(10)

    pooler.compute(feedforwardInput1, learn=True)

    for cell in pooler.getActiveCells():
      self.assertEqual(pooler.numberOfProximalSynapses([cell]), 10,
                       "Should connect to every active input bit.")
      self.assertEqual(pooler.numberOfConnectedProximalSynapses([cell]), 10,
                       "Each synapse should be marked as connected.")

      (presynapticCells,
       permanences) = pooler.proximalPermanences.rowNonZeros(cell)

      self.assertEqual(set(presynapticCells), set(feedforwardInput1),
                       "Should connect to every active input bit.")
      for perm in permanences:
        self.assertAlmostEqual(perm, 0.60,
                               msg="Should use 'initialProximalPermanence'.")

    pooler.compute(range(10, 20), learn=True)

    for cell in pooler.getActiveCells():
      self.assertEqual(pooler.numberOfProximalSynapses([cell]), 20,
                       "Should connect to every active input bit.")

    pooler.compute(range(15, 25), learn=True)

    for cell in pooler.getActiveCells():
      self.assertEqual(pooler.numberOfProximalSynapses([cell]), 25,
                       ("Should connect to every active input bit "
                        "that it's not yet connected to."))

    pooler.compute(range(0, 30), learn=True)

    for cell in pooler.getActiveCells():
      self.assertEqual(pooler.numberOfProximalSynapses([cell]), 25,
                       "Should not grow more synapses if it had lots active.")

    pooler.compute(range(23, 30), learn=True)

    for cell in pooler.getActiveCells():
      self.assertEqual(pooler.numberOfProximalSynapses([cell]), 30,
                       "Should grow as many as it can.")
      self.assertEqual(pooler.numberOfConnectedProximalSynapses([cell]), 30,
                       "Each synapse should be marked as connected.")
Пример #5
0
  def testProximalLearning_PunishExisting(self):
    """
    When a cell has a synapse to an inactive input bit, decrease its permanence
    by 'synPermProximalDec'.

    """
    pooler = ColumnPooler(
      inputWidth=2048 * 8,
      lateralInputWidth=512,
      numActiveColumnsPerInhArea=12,
      synPermProximalInc=0.0,
      synPermProximalDec=0.1,
      initialProximalPermanence=0.55,
      connectedPermanence=0.50,
      maxNewProximalSynapseCount=10,
      maxNewDistalSynapseCount=10,
    )

    # Grow some synapses.
    pooler.compute(set(range(0, 10)), learn=True)

    # Punish some of them.
    pooler.compute(set(range(0, 5)), learn=True)

    activeCells = pooler.getActiveCells()
    self.assertEqual(len(activeCells), 12)

    for cell in activeCells:
      self.assertEqual(pooler.numberOfSynapses([cell]), 10,
                       "Should connect to every active input bit.")
      self.assertEqual(pooler.numberOfConnectedSynapses([cell]), 5,
                       "Each punished synapse should no longer be marked as"
                       " connected.")

      (presynapticCells,
       permanences) = pooler.proximalPermanences.rowNonZeros(cell)

      d = dict(zip(presynapticCells, permanences))
      for presynapticCell in xrange(0, 5):
        perm = d[presynapticCell]
        self.assertAlmostEqual(
          perm, 0.55,
          msg="Should have permanence of 'initialProximalPermanence'")
      for presynapticCell in xrange(5, 10):
        perm = d[presynapticCell]
        self.assertAlmostEqual(
          perm, 0.45,
          msg=("Should have permanence of 'initialProximalPermanence'"
               " - 'synPermProximalDec'."))
Пример #6
0
    def testProximalLearning_NoSampling(self):
        """
    With sampleSize -1, during learning each cell should connect to every
    active bit.
    """
        pooler = ColumnPooler(
            inputWidth=2048 * 8,
            initialProximalPermanence=0.60,
            connectedPermanenceProximal=0.50,
            sampleSizeProximal=-1,
            synPermProximalDec=0,
        )

        feedforwardInput1 = range(10)

        pooler.compute(feedforwardInput1, learn=True)

        for cell in pooler.getActiveCells():
            self.assertEqual(pooler.numberOfProximalSynapses([cell]), 10,
                             "Should connect to every active input bit.")
            self.assertEqual(pooler.numberOfConnectedProximalSynapses([cell]),
                             10, "Each synapse should be marked as connected.")

            (presynapticCells,
             permanences) = pooler.proximalPermanences.rowNonZeros(cell)

            self.assertEqual(set(presynapticCells), set(feedforwardInput1),
                             "Should connect to every active input bit.")
            for perm in permanences:
                self.assertAlmostEqual(
                    perm, 0.60, msg="Should use 'initialProximalPermanence'.")

        pooler.compute(range(30), learn=True)

        for cell in pooler.getActiveCells():
            self.assertEqual(
                pooler.numberOfProximalSynapses([cell]), 30,
                "Should grow synapses to every unsynapsed active bit.")

        pooler.compute(range(25, 30), learn=True)

        for cell in pooler.getActiveCells():
            self.assertEqual(
                pooler.numberOfProximalSynapses([cell]), 30,
                "Every bit is synapsed so nothing else should grow.")

        pooler.compute(range(125, 130), learn=True)

        for cell in pooler.getActiveCells():
            self.assertEqual(
                pooler.numberOfProximalSynapses([cell]), 35,
                "Should grow synapses to every unsynapsed active bit.")
            self.assertEqual(pooler.numberOfConnectedProximalSynapses([cell]),
                             35, "Each synapse should be marked as connected.")
  def testProximalLearning_NoSampling(self):
    """
    With sampleSize -1, during learning each cell should connect to every
    active bit.
    """
    pooler = ColumnPooler(
      inputWidth=2048 * 8,
      initialProximalPermanence=0.60,
      connectedPermanenceProximal=0.50,
      sampleSizeProximal=-1,
      synPermProximalDec=0,
    )

    feedforwardInput1 = range(10)

    pooler.compute(feedforwardInput1, learn=True)

    for cell in pooler.getActiveCells():
      self.assertEqual(pooler.numberOfProximalSynapses([cell]), 10,
                       "Should connect to every active input bit.")
      self.assertEqual(pooler.numberOfConnectedProximalSynapses([cell]), 10,
                       "Each synapse should be marked as connected.")

      (presynapticCells,
       permanences) = pooler.proximalPermanences.rowNonZeros(cell)

      self.assertEqual(set(presynapticCells), set(feedforwardInput1),
                       "Should connect to every active input bit.")
      for perm in permanences:
        self.assertAlmostEqual(perm, 0.60,
                               msg="Should use 'initialProximalPermanence'.")

    pooler.compute(range(30), learn=True)

    for cell in pooler.getActiveCells():
      self.assertEqual(pooler.numberOfProximalSynapses([cell]), 30,
                       "Should grow synapses to every unsynapsed active bit.")

    pooler.compute(range(25, 30), learn=True)

    for cell in pooler.getActiveCells():
      self.assertEqual(pooler.numberOfProximalSynapses([cell]), 30,
                       "Every bit is synapsed so nothing else should grow.")

    pooler.compute(range(125, 130), learn=True)

    for cell in pooler.getActiveCells():
      self.assertEqual(pooler.numberOfProximalSynapses([cell]), 35,
                       "Should grow synapses to every unsynapsed active bit.")
      self.assertEqual(pooler.numberOfConnectedProximalSynapses([cell]), 35,
                       "Each synapse should be marked as connected.")
  def testInitialInference(self):
    """Tests inference after learning one pattern. """

    pooler = ColumnPooler(
      inputWidth=2048 * 8,
      columnDimensions=[2048, 1],
      maxSynapsesPerSegment=2048 * 8
    )
    activatedCells = numpy.zeros(pooler.numberOfCells())

    # Learn one pattern
    pooler.compute(feedforwardInput=set(range(0,40)), learn=True)
    activatedCells[pooler.getActiveCells()] = 1
    sum1 = sum(pooler.getActiveCells())

    # Inferring on same pattern should lead to same result
    pooler.reset()
    pooler.compute(feedforwardInput=set(range(0,40)), learn=False)
    self.assertEqual(sum1,
                     sum(pooler.getActiveCells()),
                     "Inference on pattern after learning it is incorrect")

    # Inferring with no inputs should maintain same pattern
    pooler.compute(feedforwardInput=set(), learn=False)
    self.assertEqual(sum1,
                     sum(pooler.getActiveCells()),
                     "Inference doesn't maintain activity with no input.")
Пример #9
0
  def testProximalLearning_Growth_ManyActiveInputBits(self):
    """
    When the number of available active input bits is > maxNewSynapseCount,
    each cell should grow 'maxNewSynapseCount' synapses.

    """
    pooler = ColumnPooler(
      inputWidth=2048 * 8,
      lateralInputWidth=512,
      numActiveColumnsPerInhArea=12,
      initialProximalPermanence=0.60,
      connectedPermanence=0.50,
      maxNewProximalSynapseCount=10,
      maxNewDistalSynapseCount=10,
    )

    feedforwardInput = set(range(11))

    pooler.compute(feedforwardInput, learn=True)

    activeCells = pooler.getActiveCells()
    self.assertEqual(len(activeCells), 12)

    for cell in activeCells:
      self.assertEqual(pooler.numberOfSynapses([cell]), 10,
                       "Should connect to every active input bit.")
      self.assertEqual(pooler.numberOfConnectedSynapses([cell]), 10,
                       "Each synapse should be marked as connected.")

      (presynapticCells,
       permanences) = pooler.proximalPermanences.rowNonZeros(cell)

      self.assertTrue(set(presynapticCells).issubset(feedforwardInput),
                      "Should connect to a subset of the active input bits.")
      for perm in permanences:
        self.assertAlmostEqual(perm, 0.60,
                               msg="Should use 'initialProximalPermanence'.")
Пример #10
0
  def testProximalLearning_SubsequentGrowth(self):
    """
    When all of the active input bits are synapsed, don't grow new synapses.
    When some of them are not synapsed, grow new synapses to them.

    """
    pooler = ColumnPooler(
      inputWidth=2048 * 8,
      lateralInputWidth=512,
      numActiveColumnsPerInhArea=12,
      synPermProximalInc=0.0,
      synPermProximalDec=0.0,
      initialProximalPermanence=0.60,
      connectedPermanence=0.50,
      maxNewProximalSynapseCount=10,
      maxNewDistalSynapseCount=10,
    )

    # Grow synapses.
    pooler.compute(set(range(10)), learn=True)
    for cell in pooler.getActiveCells():
      self.assertEqual(pooler.numberOfSynapses([cell]), 10,
                       "Should connect to every active input bit.")

    # Given the same input, no new synapses should form.
    pooler.compute(set(range(10)), learn=True)
    for cell in pooler.getActiveCells():
      self.assertEqual(pooler.numberOfSynapses([cell]), 10,
                       "No new synapses should form.")

    # Given a superset of the input, some new synapses should form.
    pooler.compute(set(range(20)), learn=True)
    for cell in pooler.getActiveCells():
      self.assertEqual(pooler.numberOfSynapses([cell]), 20,
                       "Should connect to the new active input bits.")
      self.assertEqual(pooler.numberOfConnectedSynapses([cell]), 20,
                       "Each synapse should be marked as connected.")
Пример #11
0
class ColumnPoolerRegion(PyRegion):
    """
  The ColumnPoolerRegion implements an L2 layer within a single cortical column / cortical
  module.

  The layer supports feed forward (proximal) and lateral inputs.
  """
    @classmethod
    def getSpec(cls):
        """
    Return the Spec for ColumnPoolerRegion.

    The parameters collection is constructed based on the parameters specified
    by the various components (tmSpec and otherSpec)
    """
        spec = dict(
            description=ColumnPoolerRegion.__doc__,
            singleNodeOnly=True,
            inputs=dict(
                feedforwardInput=dict(
                    description=
                    "The primary feed-forward input to the layer, this is a"
                    " binary array containing 0's and 1's",
                    dataType="Real32",
                    count=0,
                    required=True,
                    regionLevel=True,
                    isDefaultInput=True,
                    requireSplitterMap=False),
                lateralInput=dict(
                    description=
                    "Lateral binary input into this column, presumably from"
                    " other neighboring columns.",
                    dataType="Real32",
                    count=0,
                    required=False,
                    regionLevel=True,
                    isDefaultInput=False,
                    requireSplitterMap=False),
                resetIn=dict(
                    description="A boolean flag that indicates whether"
                    " or not the input vector received in this compute cycle"
                    " represents the first presentation in a"
                    " new temporal sequence.",
                    dataType='Real32',
                    count=1,
                    required=False,
                    regionLevel=True,
                    isDefaultInput=False,
                    requireSplitterMap=False),
            ),
            outputs=dict(
                feedForwardOutput=dict(
                    description=
                    "The default output of ColumnPoolerRegion. By default this"
                    " outputs the active cells. You can change this "
                    " dynamically using the defaultOutputType parameter.",
                    dataType="Real32",
                    count=0,
                    regionLevel=True,
                    isDefaultOutput=True),
                predictedCells=dict(
                    description="A binary output containing a 1 for every"
                    " cell that was predicted for this timestep.",
                    dataType="Real32",
                    count=0,
                    regionLevel=True,
                    isDefaultOutput=False),
                predictedActiveCells=dict(
                    description="A binary output containing a 1 for every"
                    " cell that transitioned from predicted to active.",
                    dataType="Real32",
                    count=0,
                    regionLevel=True,
                    isDefaultOutput=False),
                activeCells=dict(
                    description="A binary output containing a 1 for every"
                    " cell that is currently active.",
                    dataType="Real32",
                    count=0,
                    regionLevel=True,
                    isDefaultOutput=False),
            ),
            parameters=dict(
                learningMode=dict(
                    description="Whether the node is learning (default True).",
                    accessMode="ReadWrite",
                    dataType="Bool",
                    count=1,
                    defaultValue="true"),
                inferenceMode=dict(
                    description="Whether the node is inferring (default True).",
                    accessMode='ReadWrite',
                    dataType='Bool',
                    count=1,
                    defaultValue="true"),
                columnCount=dict(description="Number of columns in this layer",
                                 accessMode="Read",
                                 dataType="UInt32",
                                 count=1,
                                 constraints=""),
                inputWidth=dict(description='Number of inputs to the layer.',
                                accessMode='Read',
                                dataType='UInt32',
                                count=1,
                                constraints=''),
                lateralInputWidth=dict(
                    description='Number of lateral inputs to the layer.',
                    accessMode='Read',
                    dataType='UInt32',
                    count=1,
                    constraints=''),
                activationThresholdDistal=dict(
                    description=
                    "If the number of active connected synapses on a "
                    "distal segment is at least this threshold, the segment "
                    "is said to be active.",
                    accessMode="Read",
                    dataType="UInt32",
                    count=1,
                    constraints=""),
                initialPermanence=dict(
                    description="Initial permanence of a new synapse.",
                    accessMode="Read",
                    dataType="Real32",
                    count=1,
                    constraints=""),
                connectedPermanence=dict(
                    description=
                    "If the permanence value for a synapse is greater "
                    "than this value, it is said to be connected.",
                    accessMode="Read",
                    dataType="Real32",
                    count=1,
                    constraints=""),
                minThresholdProximal=dict(
                    description=
                    "If the number of synapses active on a proximal segment "
                    "is at least this threshold, it is considered as a "
                    "candidate active cell",
                    accessMode="Read",
                    dataType="UInt32",
                    count=1,
                    constraints=""),
                minThresholdDistal=dict(
                    description=
                    "If the number of synapses active on a distal segment is "
                    "at least this threshold, it is selected as the best "
                    "matching cell in a bursting column.",
                    accessMode="Read",
                    dataType="UInt32",
                    count=1,
                    constraints=""),
                maxNewProximalSynapseCount=dict(
                    description=
                    "The maximum number of synapses added to a proximal segment "
                    "at each iteration during learning.",
                    accessMode="Read",
                    dataType="UInt32",
                    count=1),
                maxNewDistalSynapseCount=dict(
                    description=
                    "The maximum number of synapses added to a distal segment "
                    "at each iteration during learning.",
                    accessMode="Read",
                    dataType="UInt32",
                    count=1),
                maxSynapsesPerDistalSegment=dict(
                    description=
                    "The maximum number of synapses on a distal segment ",
                    accessMode="Read",
                    dataType="UInt32",
                    count=1),
                maxSynapsesPerProximalSegment=dict(
                    description=
                    "The maximum number of synapses on a proximal segment ",
                    accessMode="Read",
                    dataType="UInt32",
                    count=1),
                permanenceIncrement=dict(
                    description="Amount by which permanences of synapses are "
                    "incremented during learning.",
                    accessMode="Read",
                    dataType="Real32",
                    count=1),
                permanenceDecrement=dict(
                    description="Amount by which permanences of synapses are "
                    "decremented during learning.",
                    accessMode="Read",
                    dataType="Real32",
                    count=1),
                synPermProximalInc=dict(
                    description=
                    "Amount by which permanences of proximal synapses are "
                    "incremented during learning.",
                    accessMode="Read",
                    dataType="Real32",
                    count=1),
                synPermProximalDec=dict(
                    description=
                    "Amount by which permanences of proximal synapses are "
                    "decremented during learning.",
                    accessMode="Read",
                    dataType="Real32",
                    count=1),
                initialProximalPermanence=dict(
                    description="Initial permanence of a new proximal synapse.",
                    accessMode="Read",
                    dataType="Real32",
                    count=1,
                    constraints=""),
                predictedSegmentDecrement=dict(
                    description=
                    "Amount by which active permanences of synapses of "
                    "previously predicted but inactive segments are "
                    "decremented.",
                    accessMode="Read",
                    dataType="Real32",
                    count=1),
                numActiveColumnsPerInhArea=dict(
                    description="The number of active cells invoked per object",
                    accessMode="Read",
                    dataType="UInt32",
                    count=1,
                    constraints=""),
                seed=dict(description="Seed for the random number generator.",
                          accessMode="Read",
                          dataType="UInt32",
                          count=1),
                defaultOutputType=dict(
                    description=
                    "Controls what type of cell output is placed into"
                    " the default output 'feedForwardOutput'",
                    accessMode="ReadWrite",
                    dataType="Byte",
                    count=0,
                    constraints="enum: active,predicted,predictedActiveCells",
                    defaultValue="active"),
            ),
            commands=dict(
                reset=dict(description="Explicitly reset TM states now."), ))

        return spec

    def __init__(self,
                 columnCount=2048,
                 inputWidth=16384,
                 lateralInputWidth=0,
                 activationThresholdDistal=13,
                 initialPermanence=0.21,
                 connectedPermanence=0.50,
                 minThresholdProximal=1,
                 minThresholdDistal=10,
                 maxNewProximalSynapseCount=20,
                 maxNewDistalSynapseCount=20,
                 permanenceIncrement=0.10,
                 permanenceDecrement=0.10,
                 predictedSegmentDecrement=0.0,
                 synPermProximalInc=0.1,
                 synPermProximalDec=0.001,
                 initialProximalPermanence=0.6,
                 seed=42,
                 numActiveColumnsPerInhArea=40,
                 defaultOutputType="active",
                 **kwargs):

        # Modified Column Pooler params
        self.columnCount = columnCount

        # Column Pooler params
        self.inputWidth = inputWidth
        self.lateralInputWidth = lateralInputWidth
        self.activationThresholdDistal = activationThresholdDistal
        self.initialPermanence = initialPermanence
        self.connectedPermanence = connectedPermanence
        self.minThresholdProximal = minThresholdProximal
        self.minThresholdDistal = minThresholdDistal
        self.maxNewProximalSynapseCount = maxNewProximalSynapseCount
        self.maxNewDistalSynapseCount = maxNewDistalSynapseCount
        self.permanenceIncrement = permanenceIncrement
        self.permanenceDecrement = permanenceDecrement
        self.predictedSegmentDecrement = predictedSegmentDecrement
        self.synPermProximalInc = synPermProximalInc
        self.synPermProximalDec = synPermProximalDec
        self.initialProximalPermanence = initialProximalPermanence
        self.seed = seed
        self.numActiveColumnsPerInhArea = numActiveColumnsPerInhArea
        self.maxSynapsesPerSegment = inputWidth

        # Region params
        self.learningMode = True
        self.inferenceMode = True
        self.defaultOutputType = defaultOutputType

        self._pooler = None

        PyRegion.__init__(self, **kwargs)

    def initialize(self, inputs, outputs):
        """
    Initialize the internal objects.
    """
        if self._pooler is None:
            params = {
                "inputWidth": self.inputWidth,
                "lateralInputWidth": self.lateralInputWidth,
                "columnDimensions": (self.columnCount, ),
                "activationThresholdDistal": self.activationThresholdDistal,
                "initialPermanence": self.initialPermanence,
                "connectedPermanence": self.connectedPermanence,
                "minThresholdProximal": self.minThresholdProximal,
                "minThresholdDistal": self.minThresholdDistal,
                "maxNewProximalSynapseCount": self.maxNewProximalSynapseCount,
                "maxNewDistalSynapseCount": self.maxNewDistalSynapseCount,
                "permanenceIncrement": self.permanenceIncrement,
                "permanenceDecrement": self.permanenceDecrement,
                "predictedSegmentDecrement": self.predictedSegmentDecrement,
                "synPermProximalInc": self.synPermProximalInc,
                "synPermProximalDec": self.synPermProximalDec,
                "initialProximalPermanence": self.initialProximalPermanence,
                "seed": self.seed,
                "numActiveColumnsPerInhArea": self.numActiveColumnsPerInhArea,
                "maxSynapsesPerProximalSegment": self.inputWidth,
            }
            self._pooler = ColumnPooler(**params)

    def compute(self, inputs, outputs):
        """
    Run one iteration of compute.

    Note that if the reset signal is True (1) we assume this iteration
    represents the *end* of a sequence. The output will contain the
    representation to this point and any history will then be reset. The output
    at the next compute will start fresh, presumably with bursting columns.
    """
        # Handle reset first (should be sent with an empty signal)
        if "resetIn" in inputs:
            assert len(inputs["resetIn"]) == 1
            if inputs["resetIn"][0] != 0:
                # send empty output
                self.reset()
                outputs["feedForwardOutput"][:] = 0
                outputs["activeCells"][:] = 0
                outputs["predictedCells"][:] = 0
                outputs["predictedActiveCells"][:] = 0
                return

        feedforwardInput = set(inputs["feedforwardInput"].nonzero()[0])

        if "lateralInput" in inputs:
            lateralInput = set(inputs["lateralInput"].nonzero()[0])
        else:
            lateralInput = set()

        # Send the inputs into the Column Pooler.
        self._pooler.compute(feedforwardInput,
                             lateralInput,
                             learn=self.learningMode)

        # Extract the active / predicted cells and put them into binary arrays.
        outputs["activeCells"][:] = 0
        outputs["activeCells"][self._pooler.getActiveCells()] = 1
        outputs["predictedCells"][:] = 0
        outputs["predictedCells"][self._pooler.getPredictiveCells()] = 1
        outputs["predictedActiveCells"][:] = (outputs["activeCells"] *
                                              outputs["predictedCells"])

        # Send appropriate output to feedForwardOutput.
        if self.defaultOutputType == "active":
            outputs["feedForwardOutput"][:] = outputs["activeCells"]
        elif self.defaultOutputType == "predicted":
            outputs["feedForwardOutput"][:] = outputs["predictedCells"]
        elif self.defaultOutputType == "predictedActiveCells":
            outputs["feedForwardOutput"][:] = outputs["predictedActiveCells"]
        else:
            raise Exception("Unknown outputType: " + self.defaultOutputType)

    def reset(self):
        """ Reset the state of the layer"""
        if self._pooler is not None:
            self._pooler.reset()

    def getParameter(self, parameterName, index=-1):
        """
    Get the value of a NodeSpec parameter. Most parameters are handled
    automatically by PyRegion's parameter get mechanism. The ones that need
    special treatment are explicitly handled here.
    """
        return PyRegion.getParameter(self, parameterName, index)

    def setParameter(self, parameterName, index, parameterValue):
        """
    Set the value of a Spec parameter.
    """
        if hasattr(self, parameterName):
            setattr(self, parameterName, parameterValue)
        else:
            raise Exception("Unknown parameter: " + parameterName)

    def getOutputElementCount(self, name):
        """
    Return the number of elements for the given output.
    """
        if name in [
                "feedForwardOutput", "predictedActiveCells", "predictedCells",
                "activeCells"
        ]:
            return self.columnCount
        else:
            raise Exception("Invalid output name specified: " + name)
def test_apical_dependent_TM_learning(sequenceLen, numSequences, sharedRange,
                                      seed, training_iters):
    TM = ApicalDependentSequenceMemory(**getDefaultL4Params(2048))
    pooler = ColumnPooler(**getDefaultL2Params(2048, seed))

    print "Generating sequences..."
    sequenceMachine, generatedSequences, numbers = generateSequences(
        sequenceLength=sequenceLen,
        sequenceCount=numSequences,
        sharedRange=sharedRange,
        n=2048,
        w=40,
        seed=seed)

    sequences = convertSequenceMachineSequence(generatedSequences)

    pooler_representations = []
    s = 0

    characters = {}
    char_sequences = []

    sequence_order = range(numSequences)
    for i in xrange(training_iters):
        random.shuffle(sequence_order)
        for s in sequence_order:
            sequence = sequences[s]
            pooler_representation = numpy.asarray([], dtype="int")
            TM_representation = numpy.asarray([], dtype="int")
            char_sequences.append([])
            total_pooler_representation = set()
            t = 0
            for timestep in sequence:
                datapoint = numpy.asarray(list(timestep), dtype="int")
                datapoint.sort()
                TM.compute(activeColumns=datapoint,
                           apicalInput=pooler_representation,
                           learn=True)
                TM_representation = TM.activeCells
                winners = TM.winnerCells
                predicted_cells = TM.predictedCells
                #megabursting = TM.megabursting
                #if i > 0:
                #  import ipdb; ipdb.set_trace()
                pooler.compute(feedforwardInput=TM_representation,
                               feedforwardGrowthCandidates=winners,
                               lateralInputs=(pooler_representation, ),
                               predictedInput=predicted_cells,
                               learn=True)
                pooler_representation = pooler.activeCells
                if i == training_iters - 1 and t > 0:
                    total_pooler_representation |= set(pooler_representation)
                    print len(pooler_representation)
                #print pooler_representation, len(pooler_representation), (s, t)
                t += 1

            pooler.reset()
            if i == training_iters - 1:
                pooler_representations.append(total_pooler_representation)
            s += 1

    representations = pooler_representations
    #print representations
    for i in range(len(representations)):
        for j in range(i):
            print(
                i,
                j), "overlap:", len(representations[i]
                                    & representations[j]), "Length of i:", len(
                                        representations[i])
Пример #13
0
class SingleLayerLocation2DExperiment(object):
  """
  The experiment code organized into a class.
  """

  def __init__(self, diameter, objects, featureNames):
    self.diameter = diameter

    self.objects = objects

    # A grid of location SDRs.
    self.locations = dict(
      ((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32"))
      for i in xrange(diameter)
      for j in xrange(diameter))

    # 8 transition SDRs -- one for each straight and diagonal direction.
    self.transitions = dict(
      ((i, j), np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32"))
      for i in xrange(-1, 2)
      for j in xrange(-1, 2)
      if i != 0 or j != 0)

    self.features = dict(
      (k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32"))
      for k in featureNames)

    self.locationLayer = SingleLayerLocationMemory(**{
      "cellCount": 1000,
      "deltaLocationInputSize": 1000,
      "featureLocationInputSize": 150*32,
      "sampleSize": 15,
      "activationThreshold": 10,
      "learningThreshold": 8,
    })

    self.inputLayer = ApicalTiebreakPairMemory(**{
      "columnCount": 150,
      "cellsPerColumn": 32,
      "basalInputSize": 1000,
      "apicalInputSize": 4096,
    })

    self.objectLayer = ColumnPooler(**{
      "inputWidth": 150 * 32
    })

    # Use these for classifying SDRs and for testing whether they're correct.
    self.inputRepresentations = {}
    self.objectRepresentations = {}
    self.learnedObjectPlacements = {}

    self.monitors = {}
    self.nextMonitorToken = 1


  def addMonitor(self, monitor):
    """
    Subscribe to SingleLayer2DExperiment events.

    @param monitor (SingleLayer2DExperimentMonitor)
    An object that implements a set of monitor methods

    @return (object)
    An opaque object that can be used to refer to this monitor.
    """

    token = self.nextMonitorToken
    self.nextMonitorToken += 1

    self.monitors[token] = monitor

    return token


  def removeMonitor(self, monitorToken):
    """
    Unsubscribe from LocationExperiment events.

    @param monitorToken (object)
    The return value of addMonitor() from when this monitor was added
    """
    del self.monitors[monitorToken]


  def doTimestep(self, locationSDR, transitionSDR, featureSDR,
                 egocentricLocation, learn):
    """
    Run one timestep.
    """

    for monitor in self.monitors.values():
      monitor.beforeTimestep(locationSDR, transitionSDR, featureSDR,
                             egocentricLocation, learn)

    params = {
      "newLocation": locationSDR,
      "deltaLocation": transitionSDR,
      "featureLocationInput": self.inputLayer.getActiveCells(),
      "featureLocationGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
      "learn": learn,
    }
    self.locationLayer.compute(**params)
    for monitor in self.monitors.values():
      monitor.afterLocationCompute(**params)

    params = {
      "activeColumns": featureSDR,
      "basalInput": self.locationLayer.getActiveCells(),
      "apicalInput": self.objectLayer.getActiveCells(),
    }
    self.inputLayer.compute(**params)
    for monitor in self.monitors.values():
      monitor.afterInputCompute(**params)

    params = {
      "feedforwardInput": self.inputLayer.getActiveCells(),
      "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
      "learn": learn,
    }
    self.objectLayer.compute(**params)
    for monitor in self.monitors.values():
      monitor.afterObjectCompute(**params)


  def learnTransitions(self):
    """
    Train the location layer to do path integration. For every location, teach
    it each previous-location + motor command pair.
    """

    print "Learning transitions"
    for (i, j), locationSDR in self.locations.iteritems():
      print "i, j", (i, j)
      for (di, dj), transitionSDR in self.transitions.iteritems():
        i2 = i + di
        j2 = j + dj
        if (0 <= i2 < self.diameter and
            0 <= j2 < self.diameter):
          for _ in xrange(5):
            self.locationLayer.reset()
            self.locationLayer.compute(newLocation=self.locations[(i,j)])
            self.locationLayer.compute(deltaLocation=transitionSDR,
                                       newLocation=self.locations[(i2, j2)])

    self.locationLayer.reset()


  def learnObjects(self, objectPlacements):
    """
    Learn each provided object in egocentric space. Touch every location on each
    object.

    This method doesn't try move the sensor along a path. Instead it just leaps
    the sensor to each object location, resetting the location layer with each
    leap.

    This method simultaneously learns 4 sets of synapses:
    - location -> input
    - input -> location
    - input -> object
    - object -> input
    """
    for monitor in self.monitors.values():
      monitor.afterPlaceObjects(objectPlacements)

    for objectName, objectDict in self.objects.iteritems():
      self.reset()

      objectPlacement = objectPlacements[objectName]

      for locationName, featureName in objectDict.iteritems():
        egocentricLocation = (locationName[0] + objectPlacement[0],
                              locationName[1] + objectPlacement[1])

        locationSDR = self.locations[egocentricLocation]
        featureSDR = self.features[featureName]
        transitionSDR = np.empty(0)

        self.locationLayer.reset()
        self.inputLayer.reset()

        for _ in xrange(10):
          self.doTimestep(locationSDR, transitionSDR, featureSDR,
                          egocentricLocation, learn=True)

        self.inputRepresentations[(featureName, egocentricLocation)] = (
          self.inputLayer.getActiveCells())

      self.objectRepresentations[objectName] = self.objectLayer.getActiveCells()
      self.learnedObjectPlacements[objectName] = objectPlacement


  def _selectTransition(self, allocentricLocation, objectDict, visitCounts):
    """
    Choose the transition that lands us in the location we've touched the least
    often. Break ties randomly, i.e. choose the first candidate in a shuffled
    list.
    """

    candidates = list(transition
                      for transition in self.transitions.keys()
                      if (allocentricLocation[0] + transition[0],
                          allocentricLocation[1] + transition[1]) in objectDict)
    random.shuffle(candidates)

    selectedVisitCount = None
    selectedTransition = None
    selectedAllocentricLocation = None

    for transition in candidates:
      candidateLocation = (allocentricLocation[0] + transition[0],
                           allocentricLocation[1] + transition[1])

      if (selectedVisitCount is None or
          visitCounts[candidateLocation] < selectedVisitCount):
        selectedVisitCount = visitCounts[candidateLocation]
        selectedTransition = transition
        selectedAllocentricLocation = candidateLocation

    return selectedAllocentricLocation, selectedTransition


  def inferObject(self, objectPlacements, objectName, startPoint,
                  transitionSequence, settlingTime=2):
    for monitor in self.monitors.values():
      monitor.afterPlaceObjects(objectPlacements)

    objectDict = self.objects[objectName]

    self.reset()

    allocentricLocation = startPoint
    nextTransitionSDR = np.empty(0, dtype="uint32")

    transitionIterator = iter(transitionSequence)

    try:
      while True:
        featureName = objectDict[allocentricLocation]
        egocentricLocation = (allocentricLocation[0] +
                              objectPlacements[objectName][0],
                              allocentricLocation[1] +
                              objectPlacements[objectName][1])
        featureSDR = self.features[featureName]

        steps = ([nextTransitionSDR] +
                 [np.empty(0)]*settlingTime)
        for transitionSDR in steps:
          self.doTimestep(np.empty(0), transitionSDR, featureSDR,
                          egocentricLocation, learn=False)

        transitionName = transitionIterator.next()
        allocentricLocation = (allocentricLocation[0] + transitionName[0],
                               allocentricLocation[1] + transitionName[1])
        nextTransitionSDR = self.transitions[transitionName]
    except StopIteration:
      pass


  def inferObjectsWithRandomMovements(self, objectPlacements, maxTouches=20,
                                      settlingTime=2):
    """
    Infer each object without any location input.
    """

    for monitor in self.monitors.values():
      monitor.afterPlaceObjects(objectPlacements)

    for objectName, objectDict in self.objects.iteritems():
      self.reset()

      visitCounts = defaultdict(int)

      learnedObjectPlacement = self.learnedObjectPlacements[objectName]

      allocentricLocation = random.choice(objectDict.keys())
      nextTransitionSDR = np.empty(0, dtype="uint32")

      # Traverse the object until it is inferred.
      success = False

      for _ in xrange(maxTouches):
        featureName = objectDict[allocentricLocation]
        egocentricLocation = (allocentricLocation[0] +
                              objectPlacements[objectName][0],
                              allocentricLocation[1] +
                              objectPlacements[objectName][1])
        featureSDR = self.features[featureName]

        steps = ([nextTransitionSDR] +
                 [np.empty(0)]*settlingTime)
        for transitionSDR in steps:
          self.doTimestep(np.empty(0), transitionSDR, featureSDR,
                          egocentricLocation, learn=False)

        visitCounts[allocentricLocation] += 1

        # We should eventually infer the egocentric location where we originally
        # learned this location on the object.
        learnedEgocentricLocation = (
          allocentricLocation[0] + learnedObjectPlacement[0],
          allocentricLocation[1] + learnedObjectPlacement[1])

        if (set(self.objectLayer.getActiveCells()) ==
            set(self.objectRepresentations[objectName]) and

            set(self.inputLayer.getActiveCells()) ==
            set(self.inputRepresentations[(featureName,
                                           learnedEgocentricLocation)]) and

            set(self.locationLayer.getActiveCells()) ==
            set(self.locations[learnedEgocentricLocation])):
          success = True
          break
        else:
          allocentricLocation, transitionName = self._selectTransition(
            allocentricLocation, objectDict, visitCounts)
          nextTransitionSDR = self.transitions[transitionName]


  def reset(self):
    self.locationLayer.reset()
    self.objectLayer.reset()
    self.inputLayer.reset()

    for monitor in self.monitors.values():
      monitor.afterReset()
class ColumnPoolerRegion(PyRegion):
  """
  The ColumnPoolerRegion implements an L2 layer within a single cortical column / cortical
  module.

  The layer supports feed forward (proximal) and lateral inputs.
  """

  @classmethod
  def getSpec(cls):
    """
    Return the Spec for ColumnPoolerRegion.

    The parameters collection is constructed based on the parameters specified
    by the various components (tmSpec and otherSpec)
    """
    spec = dict(
      description=ColumnPoolerRegion.__doc__,
      singleNodeOnly=True,
      inputs=dict(
        feedforwardInput=dict(
          description="The primary feed-forward input to the layer, this is a"
                      " binary array containing 0's and 1's",
          dataType="Real32",
          count=0,
          required=True,
          regionLevel=True,
          isDefaultInput=True,
          requireSplitterMap=False),

        lateralInput=dict(
          description="Lateral binary input into this column, presumably from"
                      " other neighboring columns.",
          dataType="Real32",
          count=0,
          required=False,
          regionLevel=True,
          isDefaultInput=False,
          requireSplitterMap=False),

        resetIn=dict(
          description="A boolean flag that indicates whether"
                      " or not the input vector received in this compute cycle"
                      " represents the first presentation in a"
                      " new temporal sequence.",
          dataType='Real32',
          count=1,
          required=False,
          regionLevel=True,
          isDefaultInput=False,
          requireSplitterMap=False),

      ),
      outputs=dict(
        feedForwardOutput=dict(
          description="The default output of ColumnPoolerRegion. By default this"
                      " outputs the active cells. You can change this "
                      " dynamically using the defaultOutputType parameter.",
          dataType="Real32",
          count=0,
          regionLevel=True,
          isDefaultOutput=True),

        predictiveCells=dict(
          description="A binary output containing a 1 for every"
                      " cell currently in predicted state.",
          dataType="Real32",
          count=0,
          regionLevel=True,
          isDefaultOutput=False),

        predictedActiveCells=dict(
          description="A binary output containing a 1 for every"
                      " cell that transitioned from predicted to active.",
          dataType="Real32",
          count=0,
          regionLevel=True,
          isDefaultOutput=False),

        activeCells=dict(
          description="A binary output containing a 1 for every"
                      " cell that is currently active.",
          dataType="Real32",
          count=0,
          regionLevel=True,
          isDefaultOutput=False),

      ),
      parameters=dict(
        learningMode=dict(
          description="1 if the node is learning (default 1).",
          accessMode="ReadWrite",
          dataType="UInt32",
          count=1,
          defaultValue=1,
          constraints="bool"),
        inferenceMode=dict(
          description='1 if the node is inferring (default 1).',
          accessMode='ReadWrite',
          dataType='UInt32',
          count=1,
          defaultValue=1,
          constraints='bool'),
        columnCount=dict(
          description="Number of columns in this layer",
          accessMode='ReadWrite',
          dataType="UInt32",
          count=1,
          constraints=""),
        inputWidth=dict(
          description='Number of inputs to the layer.',
          accessMode='Read',
          dataType='UInt32',
          count=1,
          constraints=''),
        cellsPerColumn=dict(
          description="Number of cells per column",
          accessMode='ReadWrite',
          dataType="UInt32",
          count=1,
          constraints=""),
        activationThreshold=dict(
          description="If the number of active connected synapses on a "
                      "segment is at least this threshold, the segment "
                      "is said to be active.",
          accessMode='ReadWrite',
          dataType="UInt32",
          count=1,
          constraints=""),
        initialPermanence=dict(
          description="Initial permanence of a new synapse.",
          accessMode='ReadWrite',
          dataType="Real32",
          count=1,
          constraints=""),
        connectedPermanence=dict(
          description="If the permanence value for a synapse is greater "
                      "than this value, it is said to be connected.",
          accessMode='ReadWrite',
          dataType="Real32",
          count=1,
          constraints=""),
        minThreshold=dict(
          description="If the number of synapses active on a segment is at "
                      "least this threshold, it is selected as the best "
                      "matching cell in a bursting column.",
          accessMode='ReadWrite',
          dataType="UInt32",
          count=1,
          constraints=""),
        maxNewSynapseCount=dict(
          description="The maximum number of synapses added to a segment "
                      "during learning.",
          accessMode='ReadWrite',
          dataType="UInt32",
          count=1),
        permanenceIncrement=dict(
          description="Amount by which permanences of synapses are "
                      "incremented during learning.",
          accessMode='ReadWrite',
          dataType="Real32",
          count=1),
        permanenceDecrement=dict(
          description="Amount by which permanences of synapses are "
                      "decremented during learning.",
          accessMode='ReadWrite',
          dataType="Real32",
          count=1),
        predictedSegmentDecrement=dict(
          description="Amount by which active permanences of synapses of "
                      "previously predicted but inactive segments are "
                      "decremented.",
          accessMode='ReadWrite',
          dataType="Real32",
          count=1),
        numActiveColumnsPerInhArea=dict(
          description="The number of active cells invoked per object",
          accessMode='ReadWrite',
          dataType="UInt32",
          count=1,
          constraints=""),
        seed=dict(
          description="Seed for the random number generator.",
          accessMode='ReadWrite',
          dataType="UInt32",
          count=1),
        defaultOutputType=dict(
          description="Controls what type of cell output is placed into"
                      " the default output 'feedForwardOutput'",
          accessMode="ReadWrite",
          dataType="Byte",
          count=0,
          constraints="enum: active,predictive,predictedActiveCells",
          defaultValue="active"),
      ),
      commands=dict(
        reset=dict(description="Explicitly reset TM states now."),
      )
    )

    return spec


  def __init__(self,
               columnCount=2048,
               inputWidth=16384,
               cellsPerColumn=1,
               activationThreshold=13,
               initialPermanence=0.21,
               connectedPermanence=0.50,
               minThreshold=10,
               maxNewSynapseCount=20,
               permanenceIncrement=0.10,
               permanenceDecrement=0.10,
               predictedSegmentDecrement=0.0,
               seed=42,
               numActiveColumnsPerInhArea=40,
               defaultOutputType = "active",
               **kwargs):
    # Defaults for all other parameters
    self.columnCount = columnCount
    self.inputWidth = inputWidth
    self.cellsPerColumn = cellsPerColumn
    self.activationThreshold = activationThreshold
    self.initialPermanence = initialPermanence
    self.connectedPermanence = connectedPermanence
    self.minThreshold = minThreshold
    self.maxNewSynapseCount = maxNewSynapseCount
    self.permanenceIncrement = permanenceIncrement
    self.permanenceDecrement = permanenceDecrement
    self.predictedSegmentDecrement = predictedSegmentDecrement
    self.seed = seed
    self.learningMode = True
    self.inferenceMode = True
    self.defaultOutputType = defaultOutputType
    self.numActiveColumnsPerInhArea = numActiveColumnsPerInhArea

    self._pooler = None

    PyRegion.__init__(self, **kwargs)


  def initialize(self, inputs, outputs):
    """
    Initialize the internal objects.
    """
    if self._pooler is None:
      args = copy.deepcopy(self.__dict__)
      self._pooler = ColumnPooler(
        columnDimensions=[self.columnCount, 1],
        maxSynapsesPerSegment = self.inputWidth,
        **args
      )


  def compute(self, inputs, outputs):
    """
    Run one iteration of compute.

    Note that if the reset signal is True (1) we assume this iteration
    represents the *end* of a sequence. The output will contain the
    representation to this point and any history will then be reset. The output
    at the next compute will start fresh, presumably with bursting columns.
    """
    print "In L2 column"
    self._pooler.compute(
      feedforwardInput=inputs['feedforwardInput'],
      activeExternalCells=inputs.get('lateralInput', None),
      learn=self.learningMode
    )


  def reset(self):
    """ Reset the state of the layer"""
    pass


  def getParameter(self, parameterName, index=-1):
    """
    Get the value of a NodeSpec parameter. Most parameters are handled
    automatically by PyRegion's parameter get mechanism. The ones that need
    special treatment are explicitly handled here.
    """
    return PyRegion.getParameter(self, parameterName, index)


  def setParameter(self, parameterName, index, parameterValue):
    """
    Set the value of a Spec parameter. Most parameters are handled
    automatically by PyRegion's parameter set mechanism. The ones that need
    special treatment are explicitly handled here.
    """
    if parameterName in ["learningMode", "inferenceMode"]:
      setattr(self, parameterName, bool(parameterValue))
    elif hasattr(self, parameterName):
      setattr(self, parameterName, parameterValue)
    else:
      raise Exception("Unknown parameter: " + parameterName)
    self.inputWidth = self.columnCount*self.cellsPerColumn


  def getOutputElementCount(self, name):
    """
    Return the number of elements for the given output.
    """
    if name in ["feedForwardOutput", "predictedActiveCells", "predictiveCells",
                "activeCells"]:
      return self.columnCount * self.cellsPerColumn
    else:
      raise Exception("Invalid output name specified")
class ColumnPoolerRegion(PyRegion):
  """
  The ColumnPoolerRegion implements an L2 layer within a single cortical column / cortical
  module.

  The layer supports feed forward (proximal) and lateral inputs.
  """

  @classmethod
  def getSpec(cls):
    """
    Return the Spec for ColumnPoolerRegion.

    The parameters collection is constructed based on the parameters specified
    by the various components (tmSpec and otherSpec)
    """
    spec = dict(
      description=ColumnPoolerRegion.__doc__,
      singleNodeOnly=True,
      inputs=dict(
        feedforwardInput=dict(
          description="The primary feed-forward input to the layer, this is a"
                      " binary array containing 0's and 1's",
          dataType="Real32",
          count=0,
          required=True,
          regionLevel=True,
          isDefaultInput=True,
          requireSplitterMap=False),

        feedforwardGrowthCandidates=dict(
          description=("An array of 0's and 1's representing feedforward input " +
                       "that can be learned on new proximal synapses. If this " +
                       "input isn't provided, the whole feedforwardInput is "
                       "used."),
          dataType="Real32",
          count=0,
          required=False,
          regionLevel=True,
          isDefaultInput=False,
          requireSplitterMap=False),

        predictedInput=dict(
          description=("An array of 0s and 1s representing input cells that " +
                       "are predicted to become active in the next time step. " +
                       "If this input is not provided, some features related " +
                       "to online learning may not function properly."),
          dataType="Real32",
          count=0,
          required=False,
          regionLevel=True,
          isDefaultInput=False,
          requireSplitterMap=False),

        lateralInput=dict(
          description="Lateral binary input into this column, presumably from"
                      " other neighboring columns.",
          dataType="Real32",
          count=0,
          required=False,
          regionLevel=True,
          isDefaultInput=False,
          requireSplitterMap=False),

        resetIn=dict(
          description="A boolean flag that indicates whether"
                      " or not the input vector received in this compute cycle"
                      " represents the first presentation in a"
                      " new temporal sequence.",
          dataType='Real32',
          count=1,
          required=False,
          regionLevel=True,
          isDefaultInput=False,
          requireSplitterMap=False),

      ),
      outputs=dict(
        feedForwardOutput=dict(
          description="The default output of ColumnPoolerRegion. By default this"
                      " outputs the active cells. You can change this "
                      " dynamically using the defaultOutputType parameter.",
          dataType="Real32",
          count=0,
          regionLevel=True,
          isDefaultOutput=True),

        activeCells=dict(
          description="A binary output containing a 1 for every"
                      " cell that is currently active.",
          dataType="Real32",
          count=0,
          regionLevel=True,
          isDefaultOutput=False),

      ),
      parameters=dict(
        learningMode=dict(
          description="Whether the node is learning (default True).",
          accessMode="ReadWrite",
          dataType="Bool",
          count=1,
          defaultValue="true"),
        onlineLearning=dict(
          description="Whether to use onlineLearning or not (default False).",
          accessMode="ReadWrite",
          dataType="Bool",
          count=1,
          defaultValue="false"),
        learningTolerance=dict(
          description="How much variation in SDR size to accept when learning. "
                      "Only has an effect if online learning is enabled. "
                      "Should be at most 1 - inertiaFactor.",
          accessMode="ReadWrite",
          dataType="Real32",
          count=1,
          defaultValue="false"),
        cellCount=dict(
          description="Number of cells in this layer",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        inputWidth=dict(
          description='Number of inputs to the layer.',
          accessMode='Read',
          dataType='UInt32',
          count=1,
          constraints=''),
        numOtherCorticalColumns=dict(
          description="The number of lateral inputs that this L2 will receive. "
                      "This region assumes that every lateral input is of size "
                      "'cellCount'.",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        sdrSize=dict(
          description="The number of active cells invoked per object",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        maxSdrSize=dict(
          description="The largest number of active cells in an SDR tolerated "
                      "during learning. Stops learning when unions are active.",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        minSdrSize=dict(
          description="The smallest number of active cells in an SDR tolerated "
                      "during learning.  Stops learning when possibly on a "
                      "different object or sequence",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),

        #
        # Proximal
        #
        synPermProximalInc=dict(
          description="Amount by which permanences of proximal synapses are "
                      "incremented during learning.",
          accessMode="Read",
          dataType="Real32",
          count=1),
        synPermProximalDec=dict(
          description="Amount by which permanences of proximal synapses are "
                      "decremented during learning.",
          accessMode="Read",
          dataType="Real32",
          count=1),
        initialProximalPermanence=dict(
          description="Initial permanence of a new proximal synapse.",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),
        sampleSizeProximal=dict(
          description="The desired number of active synapses for an active cell",
          accessMode="Read",
          dataType="Int32",
          count=1),
        minThresholdProximal=dict(
          description="If the number of synapses active on a proximal segment "
                      "is at least this threshold, it is considered as a "
                      "candidate active cell",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        connectedPermanenceProximal=dict(
          description="If the permanence value for a synapse is greater "
                      "than this value, it is said to be connected.",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),
        predictedInhibitionThreshold=dict(
          description="How many predicted cells are required to cause "
                      "inhibition in the pooler.  Only has an effect if online "
                      "learning is enabled.",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),

        #
        # Distal
        #
        synPermDistalInc=dict(
          description="Amount by which permanences of synapses are "
                      "incremented during learning.",
          accessMode="Read",
          dataType="Real32",
          count=1),
        synPermDistalDec=dict(
          description="Amount by which permanences of synapses are "
                      "decremented during learning.",
          accessMode="Read",
          dataType="Real32",
          count=1),
        initialDistalPermanence=dict(
          description="Initial permanence of a new synapse.",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),
        sampleSizeDistal=dict(
          description="The desired number of active synapses for an active "
                      "segment.",
          accessMode="Read",
          dataType="Int32",
          count=1),
        activationThresholdDistal=dict(
          description="If the number of synapses active on a distal segment is "
                      "at least this threshold, the segment is considered "
                      "active",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        connectedPermanenceDistal=dict(
          description="If the permanence value for a synapse is greater "
                      "than this value, it is said to be connected.",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),
        inertiaFactor=dict(
          description="Controls the proportion of previously active cells that "
                      "remain active through inertia in the next timestep (in  "
                      "the absence of inhibition).",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),



        seed=dict(
          description="Seed for the random number generator.",
          accessMode="Read",
          dataType="UInt32",
          count=1),
        defaultOutputType=dict(
          description="Controls what type of cell output is placed into"
                      " the default output 'feedForwardOutput'",
          accessMode="ReadWrite",
          dataType="Byte",
          count=0,
          constraints="enum: active,predicted,predictedActiveCells",
          defaultValue="active"),
      ),
      commands=dict(
        reset=dict(description="Explicitly reset TM states now."),
      )
    )

    return spec


  def __init__(self,
               cellCount=4096,
               inputWidth=16384,
               numOtherCorticalColumns=0,
               sdrSize=40,
               onlineLearning = False,
               maxSdrSize = None,
               minSdrSize = None,

               # Proximal
               synPermProximalInc=0.1,
               synPermProximalDec=0.001,
               initialProximalPermanence=0.6,
               sampleSizeProximal=20,
               minThresholdProximal=1,
               connectedPermanenceProximal=0.50,
               predictedInhibitionThreshold=20,

               # Distal
               synPermDistalInc=0.10,
               synPermDistalDec=0.10,
               initialDistalPermanence=0.21,
               sampleSizeDistal=20,
               activationThresholdDistal=13,
               connectedPermanenceDistal=0.50,
               inertiaFactor=1.,

               seed=42,
               defaultOutputType = "active",
               **kwargs):

    # Used to derive Column Pooler params
    self.numOtherCorticalColumns = numOtherCorticalColumns

    # Column Pooler params
    self.inputWidth = inputWidth
    self.cellCount = cellCount
    self.sdrSize = sdrSize
    self.onlineLearning = onlineLearning
    self.maxSdrSize = maxSdrSize
    self.minSdrSize = minSdrSize
    self.synPermProximalInc = synPermProximalInc
    self.synPermProximalDec = synPermProximalDec
    self.initialProximalPermanence = initialProximalPermanence
    self.sampleSizeProximal = sampleSizeProximal
    self.minThresholdProximal = minThresholdProximal
    self.connectedPermanenceProximal = connectedPermanenceProximal
    self.predictedInhibitionThreshold = predictedInhibitionThreshold
    self.synPermDistalInc = synPermDistalInc
    self.synPermDistalDec = synPermDistalDec
    self.initialDistalPermanence = initialDistalPermanence
    self.sampleSizeDistal = sampleSizeDistal
    self.activationThresholdDistal = activationThresholdDistal
    self.connectedPermanenceDistal = connectedPermanenceDistal
    self.inertiaFactor = inertiaFactor
    self.seed = seed

    # Region params
    self.learningMode = True
    self.defaultOutputType = defaultOutputType

    self._pooler = None

    PyRegion.__init__(self, **kwargs)


  def initialize(self):
    """
    Initialize the internal objects.
    """
    if self._pooler is None:
      params = {
        "inputWidth": self.inputWidth,
        "lateralInputWidths": [self.cellCount] * self.numOtherCorticalColumns,
        "cellCount": self.cellCount,
        "sdrSize": self.sdrSize,
        "onlineLearning": self.onlineLearning,
        "maxSdrSize": self.maxSdrSize,
        "minSdrSize": self.minSdrSize,
        "synPermProximalInc": self.synPermProximalInc,
        "synPermProximalDec": self.synPermProximalDec,
        "initialProximalPermanence": self.initialProximalPermanence,
        "minThresholdProximal": self.minThresholdProximal,
        "sampleSizeProximal": self.sampleSizeProximal,
        "connectedPermanenceProximal": self.connectedPermanenceProximal,
        "predictedInhibitionThreshold": self.predictedInhibitionThreshold,
        "synPermDistalInc": self.synPermDistalInc,
        "synPermDistalDec": self.synPermDistalDec,
        "initialDistalPermanence": self.initialDistalPermanence,
        "activationThresholdDistal": self.activationThresholdDistal,
        "sampleSizeDistal": self.sampleSizeDistal,
        "connectedPermanenceDistal": self.connectedPermanenceDistal,
        "inertiaFactor": self.inertiaFactor,
        "seed": self.seed,
      }
      self._pooler = ColumnPooler(**params)


  def compute(self, inputs, outputs):
    """
    Run one iteration of compute.

    Note that if the reset signal is True (1) we assume this iteration
    represents the *end* of a sequence. The output will contain the
    representation to this point and any history will then be reset. The output
    at the next compute will start fresh, presumably with bursting columns.
    """
    # Handle reset first (should be sent with an empty signal)
    if "resetIn" in inputs:
      assert len(inputs["resetIn"]) == 1
      if inputs["resetIn"][0] != 0:
        # send empty output
        self.reset()
        outputs["feedForwardOutput"][:] = 0
        outputs["activeCells"][:] = 0
        return

    feedforwardInput = numpy.asarray(inputs["feedforwardInput"].nonzero()[0],
                                     dtype="uint32")

    if "feedforwardGrowthCandidates" in inputs:
      feedforwardGrowthCandidates = numpy.asarray(
        inputs["feedforwardGrowthCandidates"].nonzero()[0], dtype="uint32")
    else:
      feedforwardGrowthCandidates = feedforwardInput

    if "lateralInput" in inputs:
      lateralInputs = tuple(numpy.asarray(singleInput.nonzero()[0],
                                          dtype="uint32")
                            for singleInput
                            in numpy.split(inputs["lateralInput"],
                                           self.numOtherCorticalColumns))
    else:
      lateralInputs = ()

    if "predictedInput" in inputs:
      predictedInput = numpy.asarray(
        inputs["predictedInput"].nonzero()[0], dtype="uint32")
    else:
      predictedInput = None

    # Send the inputs into the Column Pooler.
    self._pooler.compute(feedforwardInput, lateralInputs,
                         feedforwardGrowthCandidates, learn=self.learningMode,
                         predictedInput = predictedInput)

    # Extract the active / predicted cells and put them into binary arrays.
    outputs["activeCells"][:] = 0
    outputs["activeCells"][self._pooler.getActiveCells()] = 1

    # Send appropriate output to feedForwardOutput.
    if self.defaultOutputType == "active":
      outputs["feedForwardOutput"][:] = outputs["activeCells"]
    else:
      raise Exception("Unknown outputType: " + self.defaultOutputType)


  def reset(self):
    """ Reset the state of the layer"""
    if self._pooler is not None:
      self._pooler.reset()


  def getParameter(self, parameterName, index=-1):
    """
    Get the value of a NodeSpec parameter. Most parameters are handled
    automatically by PyRegion's parameter get mechanism. The ones that need
    special treatment are explicitly handled here.
    """
    return PyRegion.getParameter(self, parameterName, index)


  def setParameter(self, parameterName, index, parameterValue):
    """
    Set the value of a Spec parameter.
    """
    if hasattr(self, parameterName):
      setattr(self, parameterName, parameterValue)
    else:
      raise Exception("Unknown parameter: " + parameterName)


  def getOutputElementCount(self, name):
    """
    Return the number of elements for the given output.
    """
    if name in ["feedForwardOutput", "activeCells"]:
      return self.cellCount
    else:
      raise Exception("Invalid output name specified: " + name)
class ColumnPoolerRegion(PyRegion):
  """
  The ColumnPoolerRegion implements an L2 layer within a single cortical column / cortical
  module.

  The layer supports feed forward (proximal) and lateral inputs.
  """

  @classmethod
  def getSpec(cls):
    """
    Return the Spec for ColumnPoolerRegion.

    The parameters collection is constructed based on the parameters specified
    by the various components (tmSpec and otherSpec)
    """
    spec = dict(
      description=ColumnPoolerRegion.__doc__,
      singleNodeOnly=True,
      inputs=dict(
        feedforwardInput=dict(
          description="The primary feed-forward input to the layer, this is a"
                      " binary array containing 0's and 1's",
          dataType="Real32",
          count=0,
          required=True,
          regionLevel=True,
          isDefaultInput=True,
          requireSplitterMap=False),

        lateralInput=dict(
          description="Lateral binary input into this column, presumably from"
                      " other neighboring columns.",
          dataType="Real32",
          count=0,
          required=False,
          regionLevel=True,
          isDefaultInput=False,
          requireSplitterMap=False),

        resetIn=dict(
          description="A boolean flag that indicates whether"
                      " or not the input vector received in this compute cycle"
                      " represents the first presentation in a"
                      " new temporal sequence.",
          dataType='Real32',
          count=1,
          required=False,
          regionLevel=True,
          isDefaultInput=False,
          requireSplitterMap=False),

      ),
      outputs=dict(
        feedForwardOutput=dict(
          description="The default output of ColumnPoolerRegion. By default this"
                      " outputs the active cells. You can change this "
                      " dynamically using the defaultOutputType parameter.",
          dataType="Real32",
          count=0,
          regionLevel=True,
          isDefaultOutput=True),

        predictedCells=dict(
          description="A binary output containing a 1 for every"
                      " cell that was predicted for this timestep.",
          dataType="Real32",
          count=0,
          regionLevel=True,
          isDefaultOutput=False),

        predictedActiveCells=dict(
          description="A binary output containing a 1 for every"
                      " cell that transitioned from predicted to active.",
          dataType="Real32",
          count=0,
          regionLevel=True,
          isDefaultOutput=False),

        activeCells=dict(
          description="A binary output containing a 1 for every"
                      " cell that is currently active.",
          dataType="Real32",
          count=0,
          regionLevel=True,
          isDefaultOutput=False),

      ),
      parameters=dict(
        learningMode=dict(
          description="Whether the node is learning (default True).",
          accessMode="ReadWrite",
          dataType="Bool",
          count=1,
          defaultValue="true"),
        inferenceMode=dict(
          description="Whether the node is inferring (default True).",
          accessMode='ReadWrite',
          dataType='Bool',
          count=1,
          defaultValue="true"),
        columnCount=dict(
          description="Number of columns in this layer",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        inputWidth=dict(
          description='Number of inputs to the layer.',
          accessMode='Read',
          dataType='UInt32',
          count=1,
          constraints=''),
        lateralInputWidth=dict(
          description='Number of lateral inputs to the layer.',
          accessMode='Read',
          dataType='UInt32',
          count=1,
          constraints=''),
        activationThresholdDistal=dict(
          description="If the number of active connected synapses on a "
                      "distal segment is at least this threshold, the segment "
                      "is said to be active.",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        initialPermanence=dict(
          description="Initial permanence of a new synapse.",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),
        connectedPermanence=dict(
          description="If the permanence value for a synapse is greater "
                      "than this value, it is said to be connected.",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),
        minThresholdProximal=dict(
          description="If the number of synapses active on a proximal segment "
                      "is at least this threshold, it is considered as a "
                      "candidate active cell",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        minThresholdDistal=dict(
          description="If the number of synapses active on a distal segment is "
                      "at least this threshold, it is selected as the best "
                      "matching cell in a bursting column.",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        maxNewProximalSynapseCount=dict(
          description="The maximum number of synapses added to a proximal segment "
                      "at each iteration during learning.",
          accessMode="Read",
          dataType="UInt32",
          count=1),
        maxNewDistalSynapseCount=dict(
          description="The maximum number of synapses added to a distal segment "
                      "at each iteration during learning.",
          accessMode="Read",
          dataType="UInt32",
          count=1),
        maxSynapsesPerDistalSegment=dict(
          description="The maximum number of synapses on a distal segment ",
          accessMode="Read",
          dataType="UInt32",
          count=1),
        maxSynapsesPerProximalSegment=dict(
          description="The maximum number of synapses on a proximal segment ",
          accessMode="Read",
          dataType="UInt32",
          count=1),
        permanenceIncrement=dict(
          description="Amount by which permanences of synapses are "
                      "incremented during learning.",
          accessMode="Read",
          dataType="Real32",
          count=1),
        permanenceDecrement=dict(
          description="Amount by which permanences of synapses are "
                      "decremented during learning.",
          accessMode="Read",
          dataType="Real32",
          count=1),
        synPermProximalInc=dict(
          description="Amount by which permanences of proximal synapses are "
                      "incremented during learning.",
          accessMode="Read",
          dataType="Real32",
          count=1),
        synPermProximalDec=dict(
          description="Amount by which permanences of proximal synapses are "
                      "decremented during learning.",
          accessMode="Read",
          dataType="Real32",
          count=1),
        initialProximalPermanence=dict(
          description="Initial permanence of a new proximal synapse.",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),
        predictedSegmentDecrement=dict(
          description="Amount by which active permanences of synapses of "
                      "previously predicted but inactive segments are "
                      "decremented.",
          accessMode="Read",
          dataType="Real32",
          count=1),
        numActiveColumnsPerInhArea=dict(
          description="The number of active cells invoked per object",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        seed=dict(
          description="Seed for the random number generator.",
          accessMode="Read",
          dataType="UInt32",
          count=1),
        defaultOutputType=dict(
          description="Controls what type of cell output is placed into"
                      " the default output 'feedForwardOutput'",
          accessMode="ReadWrite",
          dataType="Byte",
          count=0,
          constraints="enum: active,predicted,predictedActiveCells",
          defaultValue="active"),
      ),
      commands=dict(
        reset=dict(description="Explicitly reset TM states now."),
      )
    )

    return spec


  def __init__(self,
               columnCount=2048,
               inputWidth=16384,
               lateralInputWidth=0,
               activationThresholdDistal=13,
               initialPermanence=0.21,
               connectedPermanence=0.50,
               minThresholdProximal=1,
               minThresholdDistal=10,
               maxNewProximalSynapseCount=20,
               maxNewDistalSynapseCount=20,
               permanenceIncrement=0.10,
               permanenceDecrement=0.10,
               predictedSegmentDecrement=0.0,
               synPermProximalInc=0.1,
               synPermProximalDec=0.001,
               initialProximalPermanence = 0.6,
               seed=42,
               numActiveColumnsPerInhArea=40,
               defaultOutputType = "active",
               **kwargs):

    # Modified Column Pooler params
    self.columnCount = columnCount

    # Column Pooler params
    self.inputWidth = inputWidth
    self.lateralInputWidth = lateralInputWidth
    self.activationThresholdDistal = activationThresholdDistal
    self.initialPermanence = initialPermanence
    self.connectedPermanence = connectedPermanence
    self.minThresholdProximal = minThresholdProximal
    self.minThresholdDistal = minThresholdDistal
    self.maxNewProximalSynapseCount = maxNewProximalSynapseCount
    self.maxNewDistalSynapseCount = maxNewDistalSynapseCount
    self.permanenceIncrement = permanenceIncrement
    self.permanenceDecrement = permanenceDecrement
    self.predictedSegmentDecrement = predictedSegmentDecrement
    self.synPermProximalInc = synPermProximalInc
    self.synPermProximalDec = synPermProximalDec
    self.initialProximalPermanence = initialProximalPermanence
    self.seed = seed
    self.numActiveColumnsPerInhArea = numActiveColumnsPerInhArea
    self.maxSynapsesPerSegment = inputWidth

    # Region params
    self.learningMode = True
    self.inferenceMode = True
    self.defaultOutputType = defaultOutputType

    self._pooler = None

    PyRegion.__init__(self, **kwargs)


  def initialize(self, inputs, outputs):
    """
    Initialize the internal objects.
    """
    if self._pooler is None:
      params = {
        "inputWidth": self.inputWidth,
        "lateralInputWidth": self.lateralInputWidth,
        "columnDimensions": (self.columnCount,),
        "activationThresholdDistal": self.activationThresholdDistal,
        "initialPermanence": self.initialPermanence,
        "connectedPermanence": self.connectedPermanence,
        "minThresholdProximal": self.minThresholdProximal,
        "minThresholdDistal": self.minThresholdDistal,
        "maxNewProximalSynapseCount": self.maxNewProximalSynapseCount,
        "maxNewDistalSynapseCount": self.maxNewDistalSynapseCount,
        "permanenceIncrement": self.permanenceIncrement,
        "permanenceDecrement": self.permanenceDecrement,
        "predictedSegmentDecrement": self.predictedSegmentDecrement,
        "synPermProximalInc": self.synPermProximalInc,
        "synPermProximalDec": self.synPermProximalDec,
        "initialProximalPermanence": self.initialProximalPermanence,
        "seed": self.seed,
        "numActiveColumnsPerInhArea": self.numActiveColumnsPerInhArea,
        "maxSynapsesPerProximalSegment": self.inputWidth,
      }
      self._pooler = ColumnPooler(**params)


  def compute(self, inputs, outputs):
    """
    Run one iteration of compute.

    Note that if the reset signal is True (1) we assume this iteration
    represents the *end* of a sequence. The output will contain the
    representation to this point and any history will then be reset. The output
    at the next compute will start fresh, presumably with bursting columns.
    """
    # Handle reset first (should be sent with an empty signal)
    if "resetIn" in inputs:
      assert len(inputs["resetIn"]) == 1
      if inputs["resetIn"][0] != 0:
        # send empty output
        self.reset()
        outputs["feedForwardOutput"][:] = 0
        outputs["activeCells"][:] = 0
        outputs["predictedCells"][:] = 0
        outputs["predictedActiveCells"][:] = 0
        return

    feedforwardInput = set(inputs["feedforwardInput"].nonzero()[0])

    if "lateralInput" in inputs:
      lateralInput = set(inputs["lateralInput"].nonzero()[0])
    else:
      lateralInput = set()

    # Send the inputs into the Column Pooler.
    self._pooler.compute(feedforwardInput, lateralInput,
                         learn=self.learningMode)

    # Extract the active / predicted cells and put them into binary arrays.
    outputs["activeCells"][:] = 0
    outputs["activeCells"][self._pooler.getActiveCells()] = 1
    outputs["predictedCells"][:] = 0
    outputs["predictedCells"][self._pooler.getPredictiveCells()] = 1
    outputs["predictedActiveCells"][:] = (outputs["activeCells"] *
                                          outputs["predictedCells"])

    # Send appropriate output to feedForwardOutput.
    if self.defaultOutputType == "active":
      outputs["feedForwardOutput"][:] = outputs["activeCells"]
    elif self.defaultOutputType == "predicted":
      outputs["feedForwardOutput"][:] = outputs["predictedCells"]
    elif self.defaultOutputType == "predictedActiveCells":
      outputs["feedForwardOutput"][:] = outputs["predictedActiveCells"]
    else:
      raise Exception("Unknown outputType: " + self.defaultOutputType)


  def reset(self):
    """ Reset the state of the layer"""
    if self._pooler is not None:
      self._pooler.reset()


  def getParameter(self, parameterName, index=-1):
    """
    Get the value of a NodeSpec parameter. Most parameters are handled
    automatically by PyRegion's parameter get mechanism. The ones that need
    special treatment are explicitly handled here.
    """
    return PyRegion.getParameter(self, parameterName, index)


  def setParameter(self, parameterName, index, parameterValue):
    """
    Set the value of a Spec parameter.
    """
    if hasattr(self, parameterName):
      setattr(self, parameterName, parameterValue)
    else:
      raise Exception("Unknown parameter: " + parameterName)


  def getOutputElementCount(self, name):
    """
    Return the number of elements for the given output.
    """
    if name in ["feedForwardOutput", "predictedActiveCells", "predictedCells",
                "activeCells"]:
      return self.columnCount
    else:
      raise Exception("Invalid output name specified: " + name)
Пример #17
0
class Grid2DLocationExperiment(object):
    """
  The experiment code organized into a class.
  """
    def __init__(self, objects, objectPlacements, featureNames,
                 locationConfigs, worldDimensions):

        self.objects = objects
        self.objectPlacements = objectPlacements
        self.worldDimensions = worldDimensions

        self.features = dict(
            (k,
             np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32"))
            for k in featureNames)

        self.locationModules = [
            SuperficialLocationModule2D(anchorInputSize=150 * 32, **config)
            for config in locationConfigs
        ]

        self.inputLayer = ApicalTiebreakPairMemory(
            **{
                "columnCount":
                150,
                "cellsPerColumn":
                32,
                "basalInputSize":
                18 * sum(
                    np.prod(config["cellDimensions"])
                    for config in locationConfigs),
                "apicalInputSize":
                4096
            })

        self.objectLayer = ColumnPooler(**{"inputWidth": 150 * 32})

        # Use these for classifying SDRs and for testing whether they're correct.
        self.locationRepresentations = {
            # Example:
            # (objectName, (top, left)): [0, 26, 54, 77, 101, ...]
        }
        self.inputRepresentations = {
            # Example:
            # (objectName, (top, left), featureName): [0, 26, 54, 77, 101, ...]
        }
        self.objectRepresentations = {
            # Example:
            # objectName: [14, 19, 54, 107, 201, ...]
        }

        self.locationInWorld = None

        self.maxSettlingTime = 10

        self.monitors = {}
        self.nextMonitorToken = 1

    def addMonitor(self, monitor):
        """
    Subscribe to Grid2DLocationExperimentMonitor events.

    @param monitor (Grid2DLocationExperimentMonitor)
    An object that implements a set of monitor methods

    @return (object)
    An opaque object that can be used to refer to this monitor.
    """

        token = self.nextMonitorToken
        self.nextMonitorToken += 1

        self.monitors[token] = monitor

        return token

    def removeMonitor(self, monitorToken):
        """
    Unsubscribe from LocationExperiment events.

    @param monitorToken (object)
    The return value of addMonitor() from when this monitor was added
    """
        del self.monitors[monitorToken]

    def getActiveLocationCells(self):
        activeCells = np.array([], dtype="uint32")

        totalPrevCells = 0
        for i, module in enumerate(self.locationModules):
            activeCells = np.append(activeCells,
                                    module.getActiveCells() + totalPrevCells)
            totalPrevCells += module.numberOfCells()

        return activeCells

    def move(self, objectName, locationOnObject):
        objectPlacement = self.objectPlacements[objectName]
        locationInWorld = (objectPlacement[0] + locationOnObject[0],
                           objectPlacement[1] + locationOnObject[1])

        if self.locationInWorld is not None:
            deltaLocation = (locationInWorld[0] - self.locationInWorld[0],
                             locationInWorld[1] - self.locationInWorld[1])

            for monitor in self.monitors.values():
                monitor.beforeMove(deltaLocation)

            params = {"deltaLocation": deltaLocation}
            for module in self.locationModules:
                module.shift(**params)

            for monitor in self.monitors.values():
                monitor.afterLocationShift(**params)

        self.locationInWorld = locationInWorld
        for monitor in self.monitors.values():
            monitor.afterWorldLocationChanged(locationInWorld)

    def _senseInferenceMode(self, featureSDR):
        prevCellActivity = None
        for i in xrange(self.maxSettlingTime):
            inputParams = {
                "activeColumns": featureSDR,
                "basalInput": self.getActiveLocationCells(),
                "apicalInput": self.objectLayer.getActiveCells(),
                "learn": False
            }
            self.inputLayer.compute(**inputParams)

            objectParams = {
                "feedforwardInput":
                self.inputLayer.getActiveCells(),
                "feedforwardGrowthCandidates":
                self.inputLayer.getPredictedActiveCells(),
                "learn":
                False,
            }
            self.objectLayer.compute(**objectParams)

            locationParams = {"anchorInput": self.inputLayer.getActiveCells()}
            for module in self.locationModules:
                module.anchor(**locationParams)

            cellActivity = (set(self.objectLayer.getActiveCells()),
                            set(self.inputLayer.getActiveCells()),
                            set(self.getActiveLocationCells()))

            if cellActivity == prevCellActivity:
                # It settled. Don't even log this timestep.
                break
            else:
                prevCellActivity = cellActivity
                for monitor in self.monitors.values():
                    if i > 0:
                        monitor.markSensoryRepetition()

                    monitor.afterInputCompute(**inputParams)
                    monitor.afterObjectCompute(**objectParams)
                    monitor.afterLocationAnchor(**locationParams)

    def _senseLearningMode(self, featureSDR):
        inputParams = {
            "activeColumns": featureSDR,
            "basalInput": self.getActiveLocationCells(),
            "apicalInput": self.objectLayer.getActiveCells(),
            "learn": True
        }
        self.inputLayer.compute(**inputParams)

        objectParams = {
            "feedforwardInput":
            self.inputLayer.getActiveCells(),
            "feedforwardGrowthCandidates":
            self.inputLayer.getPredictedActiveCells(),
            "learn":
            True,
        }
        self.objectLayer.compute(**objectParams)

        locationParams = {"anchorInput": self.inputLayer.getWinnerCells()}
        for module in self.locationModules:
            module.learn(**locationParams)

        for monitor in self.monitors.values():
            monitor.afterInputCompute(**inputParams)
            monitor.afterObjectCompute(**objectParams)

    def sense(self, featureSDR, learn):
        for monitor in self.monitors.values():
            monitor.beforeSense(featureSDR)

        if learn:
            self._senseLearningMode(featureSDR)
        else:
            self._senseInferenceMode(featureSDR)

    def learnObjects(self):
        """
    Learn each provided object.

    This method simultaneously learns 4 sets of synapses:
    - location -> input
    - input -> location
    - input -> object
    - object -> input
    """
        for objectName, objectFeatures in self.objects.iteritems():
            self.reset()

            for module in self.locationModules:
                module.activateRandomLocation()

            for feature in objectFeatures:
                locationOnObject = (feature["top"] + feature["height"] / 2,
                                    feature["left"] + feature["width"] / 2)
                self.move(objectName, locationOnObject)

                featureName = feature["name"]
                featureSDR = self.features[featureName]
                for _ in xrange(10):
                    self.sense(featureSDR, learn=True)

                self.locationRepresentations[(
                    objectName,
                    locationOnObject)] = (self.getActiveLocationCells())
                self.inputRepresentations[(
                    objectName, locationOnObject,
                    featureName)] = (self.inputLayer.getActiveCells())

            self.objectRepresentations[
                objectName] = self.objectLayer.getActiveCells()

    def inferObjectsWithRandomMovements(self):
        """
    Infer each object without any location input.
    """
        for objectName, objectFeatures in self.objects.iteritems():
            self.reset()

            inferred = False
            prevTouchSequence = None

            for _ in xrange(4):

                while True:
                    touchSequence = list(objectFeatures)
                    random.shuffle(touchSequence)

                    if prevTouchSequence is not None:
                        if touchSequence[0] == prevTouchSequence[-1]:
                            continue

                    break

                for i, feature in enumerate(touchSequence):
                    locationOnObject = (feature["top"] + feature["height"] / 2,
                                        feature["left"] + feature["width"] / 2)
                    self.move(objectName, locationOnObject)

                    featureName = feature["name"]
                    featureSDR = self.features[featureName]
                    self.sense(featureSDR, learn=False)

                    inferred = (
                        set(self.objectLayer.getActiveCells()) == set(
                            self.objectRepresentations[objectName])
                        and set(self.inputLayer.getActiveCells()) == set(
                            self.inputRepresentations[(objectName,
                                                       locationOnObject,
                                                       featureName)])
                        and set(self.getActiveLocationCells()) == set(
                            self.locationRepresentations[(objectName,
                                                          locationOnObject)]))

                    if inferred:
                        break

                prevTouchSequence = touchSequence

                if inferred:
                    break

    def reset(self):
        for module in self.locationModules:
            module.reset()
        self.objectLayer.reset()
        self.inputLayer.reset()

        self.locationInWorld = None

        for monitor in self.monitors.values():
            monitor.afterReset()
  def testShortInferenceSequence(self):
    """Tests inference after learning two objects with two patterns. """

    pooler = ColumnPooler(
      inputWidth=2048 * 8,
      columnDimensions=[2048, 1],
      maxSynapsesPerSegment=2048 * 8
    )
    activatedCells = numpy.zeros(pooler.numberOfCells())

    # Learn object one
    pooler.compute(feedforwardInput=set(range(0,40)), learn=True)
    activatedCells[pooler.getActiveCells()] = 1
    sum1 = sum(pooler.getActiveCells())

    pooler.compute(feedforwardInput=set(range(100,140)), learn=True)
    activatedCells[pooler.getActiveCells()] = 1
    self.assertEqual(sum1,
                     sum(pooler.getActiveCells()),
                     "Activity for second pattern is incorrect")

    # Learn object two
    pooler.reset()
    pooler.compute(feedforwardInput=set(range(1000,1040)), learn=True)
    activatedCells[pooler.getActiveCells()] = 1
    sum2 = sum(pooler.getActiveCells())

    pooler.compute(feedforwardInput=set(range(1100,1140)), learn=True)
    activatedCells[pooler.getActiveCells()] = 1
    self.assertEqual(sum2,
                     sum(pooler.getActiveCells()),
                     "Activity for second pattern is incorrect")

    # Inferring on patterns in first object should lead to same result, even
    # after gap
    pooler.reset()
    pooler.compute(feedforwardInput=set(range(100,140)), learn=False)
    self.assertEqual(sum1,
                     sum(pooler.getActiveCells()),
                     "Inference on pattern after learning it is incorrect")

    # Inferring with no inputs should maintain same pattern
    pooler.compute(feedforwardInput=set(), learn=False)
    self.assertEqual(sum1,
                     sum(pooler.getActiveCells()),
                     "Inference doesn't maintain activity with no input.")

    pooler.reset()
    pooler.compute(feedforwardInput=set(range(0,40)), learn=False)
    self.assertEqual(sum1,
                     sum(pooler.getActiveCells()),
                     "Inference on pattern after learning it is incorrect")

    # Inferring on patterns in second object should lead to same result, even
    # after gap
    pooler.reset()
    pooler.compute(feedforwardInput=set(range(1100,1140)), learn=False)
    self.assertEqual(sum2,
                     sum(pooler.getActiveCells()),
                     "Inference on pattern after learning it is incorrect")

    # Inferring with no inputs should maintain same pattern
    pooler.compute(feedforwardInput=set(), learn=False)
    self.assertEqual(sum2,
                     sum(pooler.getActiveCells()),
                     "Inference doesn't maintain activity with no input.")

    pooler.reset()
    pooler.compute(feedforwardInput=set(range(1000,1040)), learn=False)
    self.assertEqual(sum2,
                     sum(pooler.getActiveCells()),
                     "Inference on pattern after learning it is incorrect")
  def testInitialProximalLearning(self):
    """Tests the first few steps of proximal learning. """

    pooler = ColumnPooler(
      inputWidth=2048 * 8,
      columnDimensions=[2048, 1],
      maxSynapsesPerSegment=2048 * 8
    )
    activatedCells = numpy.zeros(pooler.numberOfCells())

    # Get initial activity
    pooler.compute(feedforwardInput=set(range(0,40)), learn=True)
    activatedCells[pooler.getActiveCells()] = 1
    self.assertEqual(activatedCells.sum(), 40,
                     "Incorrect number of active cells")
    sum1 = sum(pooler.getActiveCells())

    # Ensure we've added correct number synapses on the active cells
    self.assertEqual(
      pooler.numberOfSynapses(pooler.getActiveCells()),
      800,
      "Incorrect number of nonzero permanences on active cells"
    )

    # Ensure they are all connected
    self.assertEqual(
      pooler.numberOfConnectedSynapses(pooler.getActiveCells()),
      800,
      "Incorrect number of connected synapses on active cells"
    )

    # If we call compute with different feedforward input we should
    # get the same set of active cells
    pooler.compute(feedforwardInput=set(range(100,140)), learn=True)
    self.assertEqual(sum1, sum(pooler.getActiveCells()),
                     "Activity is not consistent for same input")

    # Ensure we've added correct number of new synapses on the active cells
    self.assertEqual(
      pooler.numberOfSynapses(pooler.getActiveCells()),
      1600,
      "Incorrect number of nonzero permanences on active cells"
    )

    # Ensure they are all connected
    self.assertEqual(
      pooler.numberOfConnectedSynapses(pooler.getActiveCells()),
      1600,
      "Incorrect number of connected synapses on active cells"
    )

    # If we call compute with no input we should still
    # get the same set of active cells
    pooler.compute(feedforwardInput=set(), learn=True)
    self.assertEqual(sum1, sum(pooler.getActiveCells()),
                     "Activity is not consistent for same input")

    # Ensure we do actually add the number of synapses we want

    # In "learn new object mode", if we call compute with the same feedforward
    # input after reset we should not get the same set of active cells
    pooler.reset()
    pooler.compute(feedforwardInput=set(range(0,40)), learn=True)
    self.assertNotEqual(sum1, sum(pooler.getActiveCells()),
               "Activity should not be consistent for same input after reset")
    self.assertEqual(len(pooler.getActiveCells()), 40,
               "Incorrect number of active cells after reset")
class Grid2DLocationExperiment(object):
  """
  The experiment code organized into a class.
  """

  def __init__(self, objects, objectPlacements, featureNames, locationConfigs,
               worldDimensions):

    self.objects = objects
    self.objectPlacements = objectPlacements
    self.worldDimensions = worldDimensions

    self.features = dict(
      (k, np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32"))
      for k in featureNames)

    self.locationModules = [SuperficialLocationModule2D(anchorInputSize=150*32,
                                                        **config)
                            for config in locationConfigs]

    self.inputLayer = ApicalTiebreakPairMemory(**{
      "columnCount": 150,
      "cellsPerColumn": 32,
      "basalInputSize": 18 * sum(np.prod(config["cellDimensions"])
                                 for config in locationConfigs),
      "apicalInputSize": 4096
    })

    self.objectLayer = ColumnPooler(**{
      "inputWidth": 150 * 32
    })

    # Use these for classifying SDRs and for testing whether they're correct.
    self.locationRepresentations = {
      # Example:
      # (objectName, (top, left)): [0, 26, 54, 77, 101, ...]
    }
    self.inputRepresentations = {
      # Example:
      # (objectName, (top, left), featureName): [0, 26, 54, 77, 101, ...]
    }
    self.objectRepresentations = {
      # Example:
      # objectName: [14, 19, 54, 107, 201, ...]
    }

    self.locationInWorld = None

    self.maxSettlingTime = 10

    self.monitors = {}
    self.nextMonitorToken = 1


  def addMonitor(self, monitor):
    """
    Subscribe to Grid2DLocationExperimentMonitor events.

    @param monitor (Grid2DLocationExperimentMonitor)
    An object that implements a set of monitor methods

    @return (object)
    An opaque object that can be used to refer to this monitor.
    """

    token = self.nextMonitorToken
    self.nextMonitorToken += 1

    self.monitors[token] = monitor

    return token


  def removeMonitor(self, monitorToken):
    """
    Unsubscribe from LocationExperiment events.

    @param monitorToken (object)
    The return value of addMonitor() from when this monitor was added
    """
    del self.monitors[monitorToken]


  def getActiveLocationCells(self):
    activeCells = np.array([], dtype="uint32")

    totalPrevCells = 0
    for i, module in enumerate(self.locationModules):
      activeCells = np.append(activeCells,
                              module.getActiveCells() + totalPrevCells)
      totalPrevCells += module.numberOfCells()

    return activeCells


  def move(self, objectName, locationOnObject):
    objectPlacement = self.objectPlacements[objectName]
    locationInWorld = (objectPlacement[0] + locationOnObject[0],
                       objectPlacement[1] + locationOnObject[1])

    if self.locationInWorld is not None:
      deltaLocation = (locationInWorld[0] - self.locationInWorld[0],
                       locationInWorld[1] - self.locationInWorld[1])

      for monitor in self.monitors.values():
        monitor.beforeMove(deltaLocation)

      params = {
        "deltaLocation": deltaLocation
      }
      for module in self.locationModules:
        module.shift(**params)

      for monitor in self.monitors.values():
        monitor.afterLocationShift(**params)

    self.locationInWorld = locationInWorld
    for monitor in self.monitors.values():
      monitor.afterWorldLocationChanged(locationInWorld)


  def _senseInferenceMode(self, featureSDR):
    prevCellActivity = None
    for i in xrange(self.maxSettlingTime):
      inputParams = {
        "activeColumns": featureSDR,
        "basalInput": self.getActiveLocationCells(),
        "apicalInput": self.objectLayer.getActiveCells(),
        "learn": False
      }
      self.inputLayer.compute(**inputParams)

      objectParams = {
        "feedforwardInput": self.inputLayer.getActiveCells(),
        "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
        "learn": False,
      }
      self.objectLayer.compute(**objectParams)

      locationParams = {
        "anchorInput": self.inputLayer.getActiveCells()
      }
      for module in self.locationModules:
        module.anchor(**locationParams)

      cellActivity = (set(self.objectLayer.getActiveCells()),
                      set(self.inputLayer.getActiveCells()),
                      set(self.getActiveLocationCells()))

      if cellActivity == prevCellActivity:
        # It settled. Don't even log this timestep.
        break
      else:
        prevCellActivity = cellActivity
        for monitor in self.monitors.values():
          if i > 0:
            monitor.markSensoryRepetition()

          monitor.afterInputCompute(**inputParams)
          monitor.afterObjectCompute(**objectParams)
          monitor.afterLocationAnchor(**locationParams)


  def _senseLearningMode(self, featureSDR):
    inputParams = {
      "activeColumns": featureSDR,
      "basalInput": self.getActiveLocationCells(),
      "apicalInput": self.objectLayer.getActiveCells(),
      "learn": True
    }
    self.inputLayer.compute(**inputParams)

    objectParams = {
      "feedforwardInput": self.inputLayer.getActiveCells(),
      "feedforwardGrowthCandidates": self.inputLayer.getPredictedActiveCells(),
      "learn": True,
    }
    self.objectLayer.compute(**objectParams)

    locationParams = {
      "anchorInput": self.inputLayer.getWinnerCells()
    }
    for module in self.locationModules:
      module.learn(**locationParams)

    for monitor in self.monitors.values():
      monitor.afterInputCompute(**inputParams)
      monitor.afterObjectCompute(**objectParams)


  def sense(self, featureSDR, learn):
    for monitor in self.monitors.values():
      monitor.beforeSense(featureSDR)

    if learn:
      self._senseLearningMode(featureSDR)
    else:
      self._senseInferenceMode(featureSDR)


  def learnObjects(self):
    """
    Learn each provided object.

    This method simultaneously learns 4 sets of synapses:
    - location -> input
    - input -> location
    - input -> object
    - object -> input
    """
    for objectName, objectFeatures in self.objects.iteritems():
      self.reset()

      for module in self.locationModules:
        module.activateRandomLocation()

      for feature in objectFeatures:
        locationOnObject = (feature["top"] + feature["height"]/2,
                            feature["left"] + feature["width"]/2)
        self.move(objectName, locationOnObject)

        featureName = feature["name"]
        featureSDR = self.features[featureName]
        for _ in xrange(10):
          self.sense(featureSDR, learn=True)

        self.locationRepresentations[(objectName, locationOnObject)] = (
          self.getActiveLocationCells())
        self.inputRepresentations[(objectName, locationOnObject, featureName)] = (
          self.inputLayer.getActiveCells())

      self.objectRepresentations[objectName] = self.objectLayer.getActiveCells()


  def inferObjectsWithRandomMovements(self):
    """
    Infer each object without any location input.
    """
    for objectName, objectFeatures in self.objects.iteritems():
      self.reset()

      inferred = False
      prevTouchSequence = None

      for _ in xrange(4):

        while True:
          touchSequence = list(objectFeatures)
          random.shuffle(touchSequence)

          if prevTouchSequence is not None:
            if touchSequence[0] == prevTouchSequence[-1]:
              continue

          break

        for i, feature in enumerate(touchSequence):
          locationOnObject = (feature["top"] + feature["height"]/2,
                              feature["left"] + feature["width"]/2)
          self.move(objectName, locationOnObject)

          featureName = feature["name"]
          featureSDR = self.features[featureName]
          self.sense(featureSDR, learn=False)

          inferred = (
            set(self.objectLayer.getActiveCells()) ==
            set(self.objectRepresentations[objectName]) and

            set(self.inputLayer.getActiveCells()) ==
            set(self.inputRepresentations[(objectName,
                                           locationOnObject,
                                           featureName)]) and

            set(self.getActiveLocationCells()) ==
            set(self.locationRepresentations[(objectName, locationOnObject)]))

          if inferred:
            break

        prevTouchSequence = touchSequence

        if inferred:
          break


  def reset(self):
    for module in self.locationModules:
      module.reset()
    self.objectLayer.reset()
    self.inputLayer.reset()

    self.locationInWorld = None

    for monitor in self.monitors.values():
      monitor.afterReset()
class RelationalMemory(object):

  def __init__(self, l4N, l4W, numModules, moduleDimensions,
               maxActivePerModule, l6ActivationThreshold):
    self.numModules = numModules
    self.moduleDimensions = moduleDimensions
    self._cellsPerModule = np.prod(moduleDimensions)
    self.maxActivePerModule = maxActivePerModule
    self.l4N = l4N
    self.l4W = l4W
    self.l6ActivationThreshold = l6ActivationThreshold

    self.l4TM = TemporalMemory(
        columnCount=l4N,
        basalInputSize=numModules*self._cellsPerModule,
        cellsPerColumn=4,
        #activationThreshold=int(numModules / 2) + 1,
        #reducedBasalThreshold=int(numModules / 2) + 1,
        activationThreshold=1,
        reducedBasalThreshold=1,
        initialPermanence=1.0,
        connectedPermanence=0.5,
        minThreshold=1,
        sampleSize=numModules,
        permanenceIncrement=1.0,
        permanenceDecrement=0.0,
    )
    self.l6Connections = [Connections(numCells=self._cellsPerModule)
                          for _ in xrange(numModules)]

    self.pooler = ColumnPooler(
      inputWidth=self.numModules*self._cellsPerModule,
    )

    self.classifier = KNNClassifier(k=1, distanceMethod="rawOverlap")
    #self.classifier = KNNClassifier(k=1, distanceMethod="norm")

    # Active state
    self.activeL6Cells = [[] for _ in xrange(numModules)]
    self.activeL5Cells = [[] for _ in xrange(numModules)]
    self.predictedL6Cells = [set([]) for _ in xrange(numModules)]

    # Debug state
    self.activeL6BeforeMotor = [[] for _ in xrange(numModules)]
    self.l6ToL4Map = collections.defaultdict(list)

  def reset(self):
    self.activeL6Cells = [[] for _ in xrange(self.numModules)]
    self.activeL5Cells = [[] for _ in xrange(self.numModules)]
    self.predictedL6Cells = [set([]) for _ in xrange(self.numModules)]
    self.l4TM.reset()
    self.pooler.reset()

  def trainFeatures(self, sensoryInputs):
    # Randomly assign bilateral connections and zero others
    for sense in sensoryInputs:
      # Choose L6 cells randomly
      activeL6Cells = [[np.random.randint(self._cellsPerModule)]
                       for _ in xrange(self.numModules)]
      l4BasalInput = getGlobalIndices(activeL6Cells, self._cellsPerModule)

      # Learn L6->L4 connections
      self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True)
      self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True)
      self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True)
      self.l4TM.compute(activeColumns=sense, basalInput=l4BasalInput, learn=True)
      activeL4Cells = self.l4TM.getActiveCells()
      # Debug: store the map
      for l6Cell in itertools.chain(*activeL6Cells):
        self.l6ToL4Map[l6Cell].extend(activeL4Cells)
      # Learn L4->L6 connections
      for l6Cells, connections in zip(activeL6Cells, self.l6Connections):
        # Assumes one cell active per L6 module when training features
        segment = connections.createSegment(l6Cells[0])
        for l4Cell in activeL4Cells:
          connections.createSynapse(segment, l4Cell, 1.0)

  def compute(self, ff, motor, objClass, outputFile):
    """Run one iteration of the online sensorimotor algorithm.

    This function has three stages:

    - The FEEDFORWARD pass drives

    Prerequisites: `trainFeatures` must have been run already

    :param ff: feedforward sensory input
    :param motor: the motor command for next move, in the form of delta
        coordinates
    :param objClass: the object class to train the classifier, or None
        if not learning
    """
    delta = motor

    # FEEDFORWARD

    # Determine active feature representation in l4, using lateral input
    # from l6 previous step feedback
    l4BasalInput = getGlobalIndices(self.predictedL6Cells, self._cellsPerModule)
    self.l4TM.compute(activeColumns=ff, basalInput=l4BasalInput,
                      learn=False)
    predictedL4Cells = self.l4TM.getPredictedCells()
    activeL4Cells = self.l4TM.getActiveCells()

    # Drive L6 activation from l4
    for m, connections in enumerate(self.l6Connections):
      newCells = []
      activeConnectedPerSegment = connections.computeActivity(activeL4Cells, 0.5)[0]
      for flatIdx, activeConnected in enumerate(activeConnectedPerSegment):
        if activeConnected >= self.l6ActivationThreshold:
          cellIdx = connections.segmentForFlatIdx(flatIdx).cell
          newCells.append(cellIdx)

      #for cell in newCells:
      #  print connections.segmentsForCell(cell)
      #print newCells
      #assert len(newCells) <= 1

      self.activeL6Cells[m].insert(0, newCells)
      # TODO: This is the number of steps, not necessarily the number of cells
      lenBefore = len(self.activeL6Cells[m])
      del self.activeL6Cells[m][self.maxActivePerModule:]
      lenAfter = len(self.activeL6Cells[m])
      #assert lenBefore == lenAfter, "Debug assert to check that we aren't hitting limit on L6 activity. Can remove when we set max active low enough relative to object size (times number of train/test iterations)"

    self.activeL6BeforeMotor = [list(itertools.chain(*l6Module))
                                for l6Module in self.activeL6Cells]

    # Replace l5 activity with new transforms
    self.activeL5Cells = []
    for activeL6Module in self.activeL6Cells:
      transforms = set()
      for newCell in activeL6Module[0]:
        for prevCell in itertools.chain(*activeL6Module[1:]):
          if newCell == prevCell:
            continue
          # Transform from prev to new
          t1 = bind(prevCell, newCell, self.moduleDimensions)
          transforms.add(t1)
          # Transform from new to prev
          t2 = bind(newCell, prevCell, self.moduleDimensions)
          transforms.add(t2)
      self.activeL5Cells.append(list(transforms))


    # Pool into object representation
    classifierLearn = True if objClass is not None else False
    globalL5ActiveCells = sorted(getGlobalIndices(self.activeL5Cells, self._cellsPerModule))
    self.pooler.compute(feedforwardInput=globalL5ActiveCells,
                        learn=classifierLearn,
                        predictedInput=globalL5ActiveCells)

    # Classifier
    classifierInput = np.zeros((self.pooler.numberOfCells(),), dtype=np.uint32)
    classifierInput[self.pooler.getActiveCells()] = 1
    #print classifierInput.nonzero()
    #print self.pooler.getActiveCells()
    #print
    self.prediction = self.classifier.infer(classifierInput)
    if objClass is not None:
      self.classifier.learn(classifierInput, objClass)

    # MOTOR

    # Update L6 based on motor command
    numActivePerModuleBefore = [sum([len(cells) for cells in active]) for active in self.activeL6Cells]

    self.activeL6Cells = [
        [[pathIntegrate(c, self.moduleDimensions, delta)
          for c in steps]
         for steps in prevActiveCells]
        for prevActiveCells in self.activeL6Cells]

    numActivePerModuleAfter = [sum([len(cells) for cells in active]) for active in self.activeL6Cells]
    assert numActivePerModuleAfter == numActivePerModuleBefore

    # FEEDBACK

    # Get all transforms associated with object
    # TODO: Get transforms from object in addition to current activity
    predictiveTransforms = [l5Active for l5Active in self.activeL5Cells]

    # Get set of predicted l6 representations (including already active)
    # and store them for next step l4 compute
    self.predictedL6Cells = []
    for l6, l5 in itertools.izip(self.activeL6Cells, predictiveTransforms):
      predictedCells = []
      for activeL6Cell in set(itertools.chain(*l6)):
        for activeL5Cell in l5:
          predictedCell = unbind(activeL6Cell, activeL5Cell, self.moduleDimensions)
          predictedCells.append(predictedCell)
      self.predictedL6Cells.append(set(
          list(itertools.chain(*l6)) + predictedCells))

    # Log this step
    if outputFile:
      log = RelationalMemoryLog.new_message()
      log.ts = time.time()
      sensationProto = log.init("sensation", len(ff))
      for i in xrange(len(ff)):
        sensationProto[i] = int(ff[i])
      predictedL4Proto = log.init("predictedL4", len(predictedL4Cells))
      for i in xrange(len(predictedL4Cells)):
        predictedL4Proto[i] = int(predictedL4Cells[i])
      activeL4Proto = log.init("activeL4", len(activeL4Cells))
      for i in xrange(len(activeL4Cells)):
        activeL4Proto[i] = int(activeL4Cells[i])
      activeL6HistoryProto = log.init("activeL6History", len(self.activeL6Cells))
      for i in xrange(len(self.activeL6Cells)):
        activeL6ModuleProto = activeL6HistoryProto.init(i, len(self.activeL6Cells[i]))
        for j in xrange(len(self.activeL6Cells[i])):
          activeL6ModuleStepProto = activeL6ModuleProto.init(j, len(self.activeL6Cells[i][j]))
          for k in xrange(len(self.activeL6Cells[i][j])):
            activeL6ModuleStepProto[k] = int(self.activeL6Cells[i][j][k])
      activeL5Proto = log.init("activeL5", len(self.activeL5Cells))
      for i in xrange(len(self.activeL5Cells)):
        activeL5ModuleProto = activeL5Proto.init(i, len(self.activeL5Cells[i]))
        for j in xrange(len(self.activeL5Cells[i])):
          activeL5ModuleProto[j] = int(self.activeL5Cells[i][j])

      classifierResults = [(i, distance)
                           for i, distance in enumerate(self.prediction[2])
                           if distance is not None]
      classifierResultsProto = log.init("classifierResults",
                                        len(classifierResults))
      for i in xrange(len(classifierResults)):
        classifierResultProto = classifierResultsProto[i]
        classifierResultProto.label = classifierResults[i][0]
        classifierResultProto.distance = float(classifierResults[i][1])

      motorDeltaProto = log.init("motorDelta", len(delta))
      for i in xrange(len(delta)):
        motorDeltaProto[i] = int(delta[i])
      predictedL6Proto = log.init("predictedL6", len(self.predictedL6Cells))
      for i in xrange(len(self.predictedL6Cells)):
        predictedL6ModuleProto = predictedL6Proto.init(i, len(self.predictedL6Cells[i]))
        for j, c in enumerate(self.predictedL6Cells[i]):
          predictedL6ModuleProto[j] = int(c)

      json.dump(log.to_dict(), outputFile)
      outputFile.write("\n")
Пример #22
0
class ColumnPoolerRegion(PyRegion):
  """
  The ColumnPoolerRegion implements an L2 layer within a single cortical column / cortical
  module.

  The layer supports feed forward (proximal) and lateral inputs.
  """

  @classmethod
  def getSpec(cls):
    """
    Return the Spec for ColumnPoolerRegion.

    The parameters collection is constructed based on the parameters specified
    by the various components (tmSpec and otherSpec)
    """
    spec = dict(
      description=ColumnPoolerRegion.__doc__,
      singleNodeOnly=True,
      inputs=dict(
        feedforwardInput=dict(
          description="The primary feed-forward input to the layer, this is a"
                      " binary array containing 0's and 1's",
          dataType="Real32",
          count=0,
          required=True,
          regionLevel=True,
          isDefaultInput=True,
          requireSplitterMap=False),

        feedforwardGrowthCandidates=dict(
          description=("An array of 0's and 1's representing feedforward input " +
                       "that can be learned on new proximal synapses. If this " +
                       "input isn't provided, the whole feedforwardInput is "
                       "used."),
          dataType="Real32",
          count=0,
          required=False,
          regionLevel=True,
          isDefaultInput=False,
          requireSplitterMap=False),

        predictedInput=dict(
          description=("An array of 0s and 1s representing input cells that " +
                       "are predicted to become active in the next time step. " +
                       "If this input is not provided, some features related " +
                       "to online learning may not function properly."),
          dataType="Real32",
          count=0,
          required=False,
          regionLevel=True,
          isDefaultInput=False,
          requireSplitterMap=False),

        lateralInput=dict(
          description="Lateral binary input into this column, presumably from"
                      " other neighboring columns.",
          dataType="Real32",
          count=0,
          required=False,
          regionLevel=True,
          isDefaultInput=False,
          requireSplitterMap=False),

        resetIn=dict(
          description="A boolean flag that indicates whether"
                      " or not the input vector received in this compute cycle"
                      " represents the first presentation in a"
                      " new temporal sequence.",
          dataType='Real32',
          count=1,
          required=False,
          regionLevel=True,
          isDefaultInput=False,
          requireSplitterMap=False),

      ),
      outputs=dict(
        feedForwardOutput=dict(
          description="The default output of ColumnPoolerRegion. By default this"
                      " outputs the active cells. You can change this "
                      " dynamically using the defaultOutputType parameter.",
          dataType="Real32",
          count=0,
          regionLevel=True,
          isDefaultOutput=True),

        activeCells=dict(
          description="A binary output containing a 1 for every"
                      " cell that is currently active.",
          dataType="Real32",
          count=0,
          regionLevel=True,
          isDefaultOutput=False),

      ),
      parameters=dict(
        learningMode=dict(
          description="Whether the node is learning (default True).",
          accessMode="ReadWrite",
          dataType="Bool",
          count=1,
          defaultValue="true"),
        onlineLearning=dict(
          description="Whether to use onlineLearning or not (default False).",
          accessMode="ReadWrite",
          dataType="Bool",
          count=1,
          defaultValue="false"),
        learningTolerance=dict(
          description="How much variation in SDR size to accept when learning. "
                      "Only has an effect if online learning is enabled. "
                      "Should be at most 1 - inertiaFactor.",
          accessMode="ReadWrite",
          dataType="Real32",
          count=1,
          defaultValue="false"),
        cellCount=dict(
          description="Number of cells in this layer",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        inputWidth=dict(
          description='Number of inputs to the layer.',
          accessMode='Read',
          dataType='UInt32',
          count=1,
          constraints=''),
        numOtherCorticalColumns=dict(
          description="The number of lateral inputs that this L2 will receive. "
                      "This region assumes that every lateral input is of size "
                      "'cellCount'.",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        sdrSize=dict(
          description="The number of active cells invoked per object",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        maxSdrSize=dict(
          description="The largest number of active cells in an SDR tolerated "
                      "during learning. Stops learning when unions are active.",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        minSdrSize=dict(
          description="The smallest number of active cells in an SDR tolerated "
                      "during learning.  Stops learning when possibly on a "
                      "different object or sequence",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),

        #
        # Proximal
        #
        synPermProximalInc=dict(
          description="Amount by which permanences of proximal synapses are "
                      "incremented during learning.",
          accessMode="Read",
          dataType="Real32",
          count=1),
        synPermProximalDec=dict(
          description="Amount by which permanences of proximal synapses are "
                      "decremented during learning.",
          accessMode="Read",
          dataType="Real32",
          count=1),
        initialProximalPermanence=dict(
          description="Initial permanence of a new proximal synapse.",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),
        sampleSizeProximal=dict(
          description="The desired number of active synapses for an active cell",
          accessMode="Read",
          dataType="Int32",
          count=1),
        minThresholdProximal=dict(
          description="If the number of synapses active on a proximal segment "
                      "is at least this threshold, it is considered as a "
                      "candidate active cell",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        connectedPermanenceProximal=dict(
          description="If the permanence value for a synapse is greater "
                      "than this value, it is said to be connected.",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),
        predictedInhibitionThreshold=dict(
          description="How many predicted cells are required to cause "
                      "inhibition in the pooler.  Only has an effect if online "
                      "learning is enabled.",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),

        #
        # Distal
        #
        synPermDistalInc=dict(
          description="Amount by which permanences of synapses are "
                      "incremented during learning.",
          accessMode="Read",
          dataType="Real32",
          count=1),
        synPermDistalDec=dict(
          description="Amount by which permanences of synapses are "
                      "decremented during learning.",
          accessMode="Read",
          dataType="Real32",
          count=1),
        initialDistalPermanence=dict(
          description="Initial permanence of a new synapse.",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),
        sampleSizeDistal=dict(
          description="The desired number of active synapses for an active "
                      "segment.",
          accessMode="Read",
          dataType="Int32",
          count=1),
        activationThresholdDistal=dict(
          description="If the number of synapses active on a distal segment is "
                      "at least this threshold, the segment is considered "
                      "active",
          accessMode="Read",
          dataType="UInt32",
          count=1,
          constraints=""),
        connectedPermanenceDistal=dict(
          description="If the permanence value for a synapse is greater "
                      "than this value, it is said to be connected.",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),
        inertiaFactor=dict(
          description="Controls the proportion of previously active cells that "
                      "remain active through inertia in the next timestep (in  "
                      "the absence of inhibition).",
          accessMode="Read",
          dataType="Real32",
          count=1,
          constraints=""),



        seed=dict(
          description="Seed for the random number generator.",
          accessMode="Read",
          dataType="UInt32",
          count=1),
        defaultOutputType=dict(
          description="Controls what type of cell output is placed into"
                      " the default output 'feedForwardOutput'",
          accessMode="ReadWrite",
          dataType="Byte",
          count=0,
          constraints="enum: active,predicted,predictedActiveCells",
          defaultValue="active"),
      ),
      commands=dict(
        reset=dict(description="Explicitly reset TM states now."),
      )
    )

    return spec


  def __init__(self,
               cellCount=4096,
               inputWidth=16384,
               numOtherCorticalColumns=0,
               sdrSize=40,
               onlineLearning = False,
               maxSdrSize = None,
               minSdrSize = None,

               # Proximal
               synPermProximalInc=0.1,
               synPermProximalDec=0.001,
               initialProximalPermanence=0.6,
               sampleSizeProximal=20,
               minThresholdProximal=1,
               connectedPermanenceProximal=0.50,
               predictedInhibitionThreshold=20,

               # Distal
               synPermDistalInc=0.10,
               synPermDistalDec=0.10,
               initialDistalPermanence=0.21,
               sampleSizeDistal=20,
               activationThresholdDistal=13,
               connectedPermanenceDistal=0.50,
               inertiaFactor=1.,

               seed=42,
               defaultOutputType = "active",
               **kwargs):

    # Used to derive Column Pooler params
    self.numOtherCorticalColumns = numOtherCorticalColumns

    # Column Pooler params
    self.inputWidth = inputWidth
    self.cellCount = cellCount
    self.sdrSize = sdrSize
    self.onlineLearning = onlineLearning
    self.maxSdrSize = maxSdrSize
    self.minSdrSize = minSdrSize
    self.synPermProximalInc = synPermProximalInc
    self.synPermProximalDec = synPermProximalDec
    self.initialProximalPermanence = initialProximalPermanence
    self.sampleSizeProximal = sampleSizeProximal
    self.minThresholdProximal = minThresholdProximal
    self.connectedPermanenceProximal = connectedPermanenceProximal
    self.predictedInhibitionThreshold = predictedInhibitionThreshold
    self.synPermDistalInc = synPermDistalInc
    self.synPermDistalDec = synPermDistalDec
    self.initialDistalPermanence = initialDistalPermanence
    self.sampleSizeDistal = sampleSizeDistal
    self.activationThresholdDistal = activationThresholdDistal
    self.connectedPermanenceDistal = connectedPermanenceDistal
    self.inertiaFactor = inertiaFactor
    self.seed = seed

    # Region params
    self.learningMode = True
    self.defaultOutputType = defaultOutputType

    self._pooler = None

    PyRegion.__init__(self, **kwargs)


  def initialize(self):
    """
    Initialize the internal objects.
    """
    if self._pooler is None:
      params = {
        "inputWidth": self.inputWidth,
        "lateralInputWidths": [self.cellCount] * self.numOtherCorticalColumns,
        "cellCount": self.cellCount,
        "sdrSize": self.sdrSize,
        "onlineLearning": self.onlineLearning,
        "maxSdrSize": self.maxSdrSize,
        "minSdrSize": self.minSdrSize,
        "synPermProximalInc": self.synPermProximalInc,
        "synPermProximalDec": self.synPermProximalDec,
        "initialProximalPermanence": self.initialProximalPermanence,
        "minThresholdProximal": self.minThresholdProximal,
        "sampleSizeProximal": self.sampleSizeProximal,
        "connectedPermanenceProximal": self.connectedPermanenceProximal,
        "predictedInhibitionThreshold": self.predictedInhibitionThreshold,
        "synPermDistalInc": self.synPermDistalInc,
        "synPermDistalDec": self.synPermDistalDec,
        "initialDistalPermanence": self.initialDistalPermanence,
        "activationThresholdDistal": self.activationThresholdDistal,
        "sampleSizeDistal": self.sampleSizeDistal,
        "connectedPermanenceDistal": self.connectedPermanenceDistal,
        "inertiaFactor": self.inertiaFactor,
        "seed": self.seed,
      }
      self._pooler = ColumnPooler(**params)


  def compute(self, inputs, outputs):
    """
    Run one iteration of compute.

    Note that if the reset signal is True (1) we assume this iteration
    represents the *end* of a sequence. The output will contain the
    representation to this point and any history will then be reset. The output
    at the next compute will start fresh, presumably with bursting columns.
    """
    # Handle reset first (should be sent with an empty signal)
    if "resetIn" in inputs:
      assert len(inputs["resetIn"]) == 1
      if inputs["resetIn"][0] != 0:
        # send empty output
        self.reset()
        outputs["feedForwardOutput"][:] = 0
        outputs["activeCells"][:] = 0
        return

    feedforwardInput = numpy.asarray(inputs["feedforwardInput"].nonzero()[0],
                                     dtype="uint32")

    if "feedforwardGrowthCandidates" in inputs:
      feedforwardGrowthCandidates = numpy.asarray(
        inputs["feedforwardGrowthCandidates"].nonzero()[0], dtype="uint32")
    else:
      feedforwardGrowthCandidates = feedforwardInput

    if "lateralInput" in inputs:
      lateralInputs = tuple(numpy.asarray(singleInput.nonzero()[0],
                                          dtype="uint32")
                            for singleInput
                            in numpy.split(inputs["lateralInput"],
                                           self.numOtherCorticalColumns))
    else:
      lateralInputs = ()

    if "predictedInput" in inputs:
      predictedInput = numpy.asarray(
        inputs["predictedInput"].nonzero()[0], dtype="uint32")
    else:
      predictedInput = None

    # Send the inputs into the Column Pooler.
    self._pooler.compute(feedforwardInput, lateralInputs,
                         feedforwardGrowthCandidates, learn=self.learningMode,
                         predictedInput = predictedInput)

    # Extract the active / predicted cells and put them into binary arrays.
    outputs["activeCells"][:] = 0
    outputs["activeCells"][self._pooler.getActiveCells()] = 1

    # Send appropriate output to feedForwardOutput.
    if self.defaultOutputType == "active":
      outputs["feedForwardOutput"][:] = outputs["activeCells"]
    else:
      raise Exception("Unknown outputType: " + self.defaultOutputType)


  def reset(self):
    """ Reset the state of the layer"""
    if self._pooler is not None:
      self._pooler.reset()


  def getParameter(self, parameterName, index=-1):
    """
    Get the value of a NodeSpec parameter. Most parameters are handled
    automatically by PyRegion's parameter get mechanism. The ones that need
    special treatment are explicitly handled here.
    """
    return PyRegion.getParameter(self, parameterName, index)


  def setParameter(self, parameterName, index, parameterValue):
    """
    Set the value of a Spec parameter.
    """
    if hasattr(self, parameterName):
      setattr(self, parameterName, parameterValue)
    else:
      raise Exception("Unknown parameter: " + parameterName)


  def getOutputElementCount(self, name):
    """
    Return the number of elements for the given output.
    """
    if name in ["feedForwardOutput", "activeCells"]:
      return self.cellCount
    else:
      raise Exception("Invalid output name specified: " + name)


  def getAlgorithmInstance(self):
    """
    Returns an instance of the underlying column pooler instance
    """
    return self._pooler
class RelationalMemory(object):
    def __init__(self, l4N, l4W, numModules, moduleDimensions,
                 maxActivePerModule, l6ActivationThreshold):
        self.numModules = numModules
        self.moduleDimensions = moduleDimensions
        self._cellsPerModule = np.prod(moduleDimensions)
        self.maxActivePerModule = maxActivePerModule
        self.l4N = l4N
        self.l4W = l4W
        self.l6ActivationThreshold = l6ActivationThreshold

        self.l4TM = TemporalMemory(
            columnCount=l4N,
            basalInputSize=numModules * self._cellsPerModule,
            cellsPerColumn=4,
            #activationThreshold=int(numModules / 2) + 1,
            #reducedBasalThreshold=int(numModules / 2) + 1,
            activationThreshold=1,
            reducedBasalThreshold=1,
            initialPermanence=1.0,
            connectedPermanence=0.5,
            minThreshold=1,
            sampleSize=numModules,
            permanenceIncrement=1.0,
            permanenceDecrement=0.0,
        )
        self.l6Connections = [
            Connections(numCells=self._cellsPerModule)
            for _ in xrange(numModules)
        ]

        self.pooler = ColumnPooler(inputWidth=self.numModules *
                                   self._cellsPerModule, )

        self.classifier = KNNClassifier(k=1, distanceMethod="rawOverlap")
        #self.classifier = KNNClassifier(k=1, distanceMethod="norm")

        # Active state
        self.activeL6Cells = [[] for _ in xrange(numModules)]
        self.activeL5Cells = [[] for _ in xrange(numModules)]
        self.predictedL6Cells = [set([]) for _ in xrange(numModules)]

        # Debug state
        self.activeL6BeforeMotor = [[] for _ in xrange(numModules)]
        self.l6ToL4Map = collections.defaultdict(list)

    def reset(self):
        self.activeL6Cells = [[] for _ in xrange(self.numModules)]
        self.activeL5Cells = [[] for _ in xrange(self.numModules)]
        self.predictedL6Cells = [set([]) for _ in xrange(self.numModules)]
        self.l4TM.reset()
        self.pooler.reset()

    def trainFeatures(self, sensoryInputs):
        # Randomly assign bilateral connections and zero others
        for sense in sensoryInputs:
            # Choose L6 cells randomly
            activeL6Cells = [[np.random.randint(self._cellsPerModule)]
                             for _ in xrange(self.numModules)]
            l4BasalInput = getGlobalIndices(activeL6Cells,
                                            self._cellsPerModule)

            # Learn L6->L4 connections
            self.l4TM.compute(activeColumns=sense,
                              basalInput=l4BasalInput,
                              learn=True)
            self.l4TM.compute(activeColumns=sense,
                              basalInput=l4BasalInput,
                              learn=True)
            self.l4TM.compute(activeColumns=sense,
                              basalInput=l4BasalInput,
                              learn=True)
            self.l4TM.compute(activeColumns=sense,
                              basalInput=l4BasalInput,
                              learn=True)
            activeL4Cells = self.l4TM.getActiveCells()
            # Debug: store the map
            for l6Cell in itertools.chain(*activeL6Cells):
                self.l6ToL4Map[l6Cell].extend(activeL4Cells)
            # Learn L4->L6 connections
            for l6Cells, connections in zip(activeL6Cells, self.l6Connections):
                # Assumes one cell active per L6 module when training features
                segment = connections.createSegment(l6Cells[0])
                for l4Cell in activeL4Cells:
                    connections.createSynapse(segment, l4Cell, 1.0)

    def compute(self, ff, motor, objClass, outputFile):
        """Run one iteration of the online sensorimotor algorithm.

    This function has three stages:

    - The FEEDFORWARD pass drives

    Prerequisites: `trainFeatures` must have been run already

    :param ff: feedforward sensory input
    :param motor: the motor command for next move, in the form of delta
        coordinates
    :param objClass: the object class to train the classifier, or None
        if not learning
    """
        delta = motor

        # FEEDFORWARD

        # Determine active feature representation in l4, using lateral input
        # from l6 previous step feedback
        l4BasalInput = getGlobalIndices(self.predictedL6Cells,
                                        self._cellsPerModule)
        self.l4TM.compute(activeColumns=ff,
                          basalInput=l4BasalInput,
                          learn=False)
        predictedL4Cells = self.l4TM.getPredictedCells()
        activeL4Cells = self.l4TM.getActiveCells()

        # Drive L6 activation from l4
        for m, connections in enumerate(self.l6Connections):
            newCells = []
            activeConnectedPerSegment = connections.computeActivity(
                activeL4Cells, 0.5)[0]
            for flatIdx, activeConnected in enumerate(
                    activeConnectedPerSegment):
                if activeConnected >= self.l6ActivationThreshold:
                    cellIdx = connections.segmentForFlatIdx(flatIdx).cell
                    newCells.append(cellIdx)

            #for cell in newCells:
            #  print connections.segmentsForCell(cell)
            #print newCells
            #assert len(newCells) <= 1

            self.activeL6Cells[m].insert(0, newCells)
            # TODO: This is the number of steps, not necessarily the number of cells
            lenBefore = len(self.activeL6Cells[m])
            del self.activeL6Cells[m][self.maxActivePerModule:]
            lenAfter = len(self.activeL6Cells[m])
            #assert lenBefore == lenAfter, "Debug assert to check that we aren't hitting limit on L6 activity. Can remove when we set max active low enough relative to object size (times number of train/test iterations)"

        self.activeL6BeforeMotor = [
            list(itertools.chain(*l6Module)) for l6Module in self.activeL6Cells
        ]

        # Replace l5 activity with new transforms
        self.activeL5Cells = []
        for activeL6Module in self.activeL6Cells:
            transforms = set()
            for newCell in activeL6Module[0]:
                for prevCell in itertools.chain(*activeL6Module[1:]):
                    if newCell == prevCell:
                        continue
                    # Transform from prev to new
                    t1 = bind(prevCell, newCell, self.moduleDimensions)
                    transforms.add(t1)
                    # Transform from new to prev
                    t2 = bind(newCell, prevCell, self.moduleDimensions)
                    transforms.add(t2)
            self.activeL5Cells.append(list(transforms))

        # Pool into object representation
        classifierLearn = True if objClass is not None else False
        globalL5ActiveCells = sorted(
            getGlobalIndices(self.activeL5Cells, self._cellsPerModule))
        self.pooler.compute(feedforwardInput=globalL5ActiveCells,
                            learn=classifierLearn,
                            predictedInput=globalL5ActiveCells)

        # Classifier
        classifierInput = np.zeros((self.pooler.numberOfCells(), ),
                                   dtype=np.uint32)
        classifierInput[self.pooler.getActiveCells()] = 1
        #print classifierInput.nonzero()
        #print self.pooler.getActiveCells()
        #print
        self.prediction = self.classifier.infer(classifierInput)
        if objClass is not None:
            self.classifier.learn(classifierInput, objClass)

        # MOTOR

        # Update L6 based on motor command
        numActivePerModuleBefore = [
            sum([len(cells) for cells in active])
            for active in self.activeL6Cells
        ]

        self.activeL6Cells = [[[
            pathIntegrate(c, self.moduleDimensions, delta) for c in steps
        ] for steps in prevActiveCells]
                              for prevActiveCells in self.activeL6Cells]

        numActivePerModuleAfter = [
            sum([len(cells) for cells in active])
            for active in self.activeL6Cells
        ]
        assert numActivePerModuleAfter == numActivePerModuleBefore

        # FEEDBACK

        # Get all transforms associated with object
        # TODO: Get transforms from object in addition to current activity
        predictiveTransforms = [l5Active for l5Active in self.activeL5Cells]

        # Get set of predicted l6 representations (including already active)
        # and store them for next step l4 compute
        self.predictedL6Cells = []
        for l6, l5 in itertools.izip(self.activeL6Cells, predictiveTransforms):
            predictedCells = []
            for activeL6Cell in set(itertools.chain(*l6)):
                for activeL5Cell in l5:
                    predictedCell = unbind(activeL6Cell, activeL5Cell,
                                           self.moduleDimensions)
                    predictedCells.append(predictedCell)
            self.predictedL6Cells.append(
                set(list(itertools.chain(*l6)) + predictedCells))

        # Log this step
        if outputFile:
            log = RelationalMemoryLog.new_message()
            log.ts = time.time()
            sensationProto = log.init("sensation", len(ff))
            for i in xrange(len(ff)):
                sensationProto[i] = int(ff[i])
            predictedL4Proto = log.init("predictedL4", len(predictedL4Cells))
            for i in xrange(len(predictedL4Cells)):
                predictedL4Proto[i] = int(predictedL4Cells[i])
            activeL4Proto = log.init("activeL4", len(activeL4Cells))
            for i in xrange(len(activeL4Cells)):
                activeL4Proto[i] = int(activeL4Cells[i])
            activeL6HistoryProto = log.init("activeL6History",
                                            len(self.activeL6Cells))
            for i in xrange(len(self.activeL6Cells)):
                activeL6ModuleProto = activeL6HistoryProto.init(
                    i, len(self.activeL6Cells[i]))
                for j in xrange(len(self.activeL6Cells[i])):
                    activeL6ModuleStepProto = activeL6ModuleProto.init(
                        j, len(self.activeL6Cells[i][j]))
                    for k in xrange(len(self.activeL6Cells[i][j])):
                        activeL6ModuleStepProto[k] = int(
                            self.activeL6Cells[i][j][k])
            activeL5Proto = log.init("activeL5", len(self.activeL5Cells))
            for i in xrange(len(self.activeL5Cells)):
                activeL5ModuleProto = activeL5Proto.init(
                    i, len(self.activeL5Cells[i]))
                for j in xrange(len(self.activeL5Cells[i])):
                    activeL5ModuleProto[j] = int(self.activeL5Cells[i][j])

            classifierResults = [
                (i, distance) for i, distance in enumerate(self.prediction[2])
                if distance is not None
            ]
            classifierResultsProto = log.init("classifierResults",
                                              len(classifierResults))
            for i in xrange(len(classifierResults)):
                classifierResultProto = classifierResultsProto[i]
                classifierResultProto.label = classifierResults[i][0]
                classifierResultProto.distance = float(classifierResults[i][1])

            motorDeltaProto = log.init("motorDelta", len(delta))
            for i in xrange(len(delta)):
                motorDeltaProto[i] = int(delta[i])
            predictedL6Proto = log.init("predictedL6",
                                        len(self.predictedL6Cells))
            for i in xrange(len(self.predictedL6Cells)):
                predictedL6ModuleProto = predictedL6Proto.init(
                    i, len(self.predictedL6Cells[i]))
                for j, c in enumerate(self.predictedL6Cells[i]):
                    predictedL6ModuleProto[j] = int(c)

            json.dump(log.to_dict(), outputFile)
            outputFile.write("\n")
Пример #24
0
class SingleLayerLocation2DExperiment(object):
    """
  The experiment code organized into a class.
  """
    def __init__(self, diameter, objects, featureNames):
        self.diameter = diameter

        self.objects = objects

        # A grid of location SDRs.
        self.locations = dict(
            ((i, j),
             np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32"))
            for i in xrange(diameter) for j in xrange(diameter))

        # 8 transition SDRs -- one for each straight and diagonal direction.
        self.transitions = dict(
            ((i, j),
             np.array(sorted(random.sample(xrange(1000), 30)), dtype="uint32"))
            for i in xrange(-1, 2) for j in xrange(-1, 2) if i != 0 or j != 0)

        self.features = dict(
            (k,
             np.array(sorted(random.sample(xrange(150), 15)), dtype="uint32"))
            for k in featureNames)

        self.locationLayer = SingleLayerLocationMemory(
            **{
                "cellCount": 1000,
                "deltaLocationInputSize": 1000,
                "featureLocationInputSize": 150 * 32,
                "sampleSize": 15,
                "activationThreshold": 10,
                "learningThreshold": 8,
            })

        self.inputLayer = ApicalTiebreakTemporalMemory(
            **{
                "columnCount": 150,
                "cellsPerColumn": 32,
                "basalInputSize": 1000,
                "apicalInputSize": 4096,
            })

        self.objectLayer = ColumnPooler(**{"inputWidth": 150 * 32})

        # Use these for classifying SDRs and for testing whether they're correct.
        self.inputRepresentations = {}
        self.objectRepresentations = {}
        self.learnedObjectPlacements = {}

        self.monitors = {}
        self.nextMonitorToken = 1

    def addMonitor(self, monitor):
        """
    Subscribe to SingleLayer2DExperiment events.

    @param monitor (SingleLayer2DExperimentMonitor)
    An object that implements a set of monitor methods

    @return (object)
    An opaque object that can be used to refer to this monitor.
    """

        token = self.nextMonitorToken
        self.nextMonitorToken += 1

        self.monitors[token] = monitor

        return token

    def removeMonitor(self, monitorToken):
        """
    Unsubscribe from LocationExperiment events.

    @param monitorToken (object)
    The return value of addMonitor() from when this monitor was added
    """
        del self.monitors[monitorToken]

    def doTimestep(self, locationSDR, transitionSDR, featureSDR,
                   egocentricLocation, learn):
        """
    Run one timestep.
    """

        for monitor in self.monitors.values():
            monitor.beforeTimestep(locationSDR, transitionSDR, featureSDR,
                                   egocentricLocation, learn)

        params = {
            "newLocation":
            locationSDR,
            "deltaLocation":
            transitionSDR,
            "featureLocationInput":
            self.inputLayer.getActiveCells(),
            "featureLocationGrowthCandidates":
            self.inputLayer.getPredictedActiveCells(),
            "learn":
            learn,
        }
        self.locationLayer.compute(**params)
        for monitor in self.monitors.values():
            monitor.afterLocationCompute(**params)

        params = {
            "activeColumns": featureSDR,
            "basalInput": self.locationLayer.getActiveCells(),
            "apicalInput": self.objectLayer.getActiveCells(),
        }
        self.inputLayer.compute(**params)
        for monitor in self.monitors.values():
            monitor.afterInputCompute(**params)

        params = {
            "feedforwardInput":
            self.inputLayer.getActiveCells(),
            "feedforwardGrowthCandidates":
            self.inputLayer.getPredictedActiveCells(),
            "learn":
            learn,
        }
        self.objectLayer.compute(**params)
        for monitor in self.monitors.values():
            monitor.afterObjectCompute(**params)

    def learnTransitions(self):
        """
    Train the location layer to do path integration. For every location, teach
    it each previous-location + motor command pair.
    """

        print "Learning transitions"
        for (i, j), locationSDR in self.locations.iteritems():
            print "i, j", (i, j)
            for (di, dj), transitionSDR in self.transitions.iteritems():
                i2 = i + di
                j2 = j + dj
                if (0 <= i2 < self.diameter and 0 <= j2 < self.diameter):
                    for _ in xrange(5):
                        self.locationLayer.reset()
                        self.locationLayer.compute(
                            newLocation=self.locations[(i, j)])
                        self.locationLayer.compute(
                            deltaLocation=transitionSDR,
                            newLocation=self.locations[(i2, j2)])

        self.locationLayer.reset()

    def learnObjects(self, objectPlacements):
        """
    Learn each provided object in egocentric space. Touch every location on each
    object.

    This method doesn't try move the sensor along a path. Instead it just leaps
    the sensor to each object location, resetting the location layer with each
    leap.

    This method simultaneously learns 4 sets of synapses:
    - location -> input
    - input -> location
    - input -> object
    - object -> input
    """
        for monitor in self.monitors.values():
            monitor.afterPlaceObjects(objectPlacements)

        for objectName, objectDict in self.objects.iteritems():
            self.reset()

            objectPlacement = objectPlacements[objectName]

            for locationName, featureName in objectDict.iteritems():
                egocentricLocation = (locationName[0] + objectPlacement[0],
                                      locationName[1] + objectPlacement[1])

                locationSDR = self.locations[egocentricLocation]
                featureSDR = self.features[featureName]
                transitionSDR = np.empty(0)

                self.locationLayer.reset()
                self.inputLayer.reset()

                for _ in xrange(10):
                    self.doTimestep(locationSDR,
                                    transitionSDR,
                                    featureSDR,
                                    egocentricLocation,
                                    learn=True)

                self.inputRepresentations[(
                    featureName,
                    egocentricLocation)] = (self.inputLayer.getActiveCells())

            self.objectRepresentations[
                objectName] = self.objectLayer.getActiveCells()
            self.learnedObjectPlacements[objectName] = objectPlacement

    def _selectTransition(self, allocentricLocation, objectDict, visitCounts):
        """
    Choose the transition that lands us in the location we've touched the least
    often. Break ties randomly, i.e. choose the first candidate in a shuffled
    list.
    """

        candidates = list(
            transition for transition in self.transitions.keys()
            if (allocentricLocation[0] + transition[0],
                allocentricLocation[1] + transition[1]) in objectDict)
        random.shuffle(candidates)

        selectedVisitCount = None
        selectedTransition = None
        selectedAllocentricLocation = None

        for transition in candidates:
            candidateLocation = (allocentricLocation[0] + transition[0],
                                 allocentricLocation[1] + transition[1])

            if (selectedVisitCount is None
                    or visitCounts[candidateLocation] < selectedVisitCount):
                selectedVisitCount = visitCounts[candidateLocation]
                selectedTransition = transition
                selectedAllocentricLocation = candidateLocation

        return selectedAllocentricLocation, selectedTransition

    def inferObject(self,
                    objectPlacements,
                    objectName,
                    startPoint,
                    transitionSequence,
                    settlingTime=2):
        for monitor in self.monitors.values():
            monitor.afterPlaceObjects(objectPlacements)

        objectDict = self.objects[objectName]

        self.reset()

        allocentricLocation = startPoint
        nextTransitionSDR = np.empty(0, dtype="uint32")

        transitionIterator = iter(transitionSequence)

        try:
            while True:
                featureName = objectDict[allocentricLocation]
                egocentricLocation = (allocentricLocation[0] +
                                      objectPlacements[objectName][0],
                                      allocentricLocation[1] +
                                      objectPlacements[objectName][1])
                featureSDR = self.features[featureName]

                steps = ([nextTransitionSDR] + [np.empty(0)] * settlingTime)
                for transitionSDR in steps:
                    self.doTimestep(np.empty(0),
                                    transitionSDR,
                                    featureSDR,
                                    egocentricLocation,
                                    learn=False)

                transitionName = transitionIterator.next()
                allocentricLocation = (allocentricLocation[0] +
                                       transitionName[0],
                                       allocentricLocation[1] +
                                       transitionName[1])
                nextTransitionSDR = self.transitions[transitionName]
        except StopIteration:
            pass

    def inferObjectsWithRandomMovements(self,
                                        objectPlacements,
                                        maxTouches=20,
                                        settlingTime=2):
        """
    Infer each object without any location input.
    """

        for monitor in self.monitors.values():
            monitor.afterPlaceObjects(objectPlacements)

        for objectName, objectDict in self.objects.iteritems():
            self.reset()

            visitCounts = defaultdict(int)

            learnedObjectPlacement = self.learnedObjectPlacements[objectName]

            allocentricLocation = random.choice(objectDict.keys())
            nextTransitionSDR = np.empty(0, dtype="uint32")

            # Traverse the object until it is inferred.
            success = False

            for _ in xrange(maxTouches):
                featureName = objectDict[allocentricLocation]
                egocentricLocation = (allocentricLocation[0] +
                                      objectPlacements[objectName][0],
                                      allocentricLocation[1] +
                                      objectPlacements[objectName][1])
                featureSDR = self.features[featureName]

                steps = ([nextTransitionSDR] + [np.empty(0)] * settlingTime)
                for transitionSDR in steps:
                    self.doTimestep(np.empty(0),
                                    transitionSDR,
                                    featureSDR,
                                    egocentricLocation,
                                    learn=False)

                visitCounts[allocentricLocation] += 1

                # We should eventually infer the egocentric location where we originally
                # learned this location on the object.
                learnedEgocentricLocation = (allocentricLocation[0] +
                                             learnedObjectPlacement[0],
                                             allocentricLocation[1] +
                                             learnedObjectPlacement[1])

                if (set(self.objectLayer.getActiveCells()) == set(
                        self.objectRepresentations[objectName])
                        and set(self.inputLayer.getActiveCells()) == set(
                            self.inputRepresentations[(
                                featureName, learnedEgocentricLocation)])
                        and set(self.locationLayer.getActiveCells()) == set(
                            self.locations[learnedEgocentricLocation])):
                    success = True
                    break
                else:
                    allocentricLocation, transitionName = self._selectTransition(
                        allocentricLocation, objectDict, visitCounts)
                    nextTransitionSDR = self.transitions[transitionName]

    def reset(self):
        self.locationLayer.reset()
        self.objectLayer.reset()
        self.inputLayer.reset()

        for monitor in self.monitors.values():
            monitor.afterReset()
def test_apical_dependent_TM_learning(sequenceLen, numSequences, sharedRange, seed, training_iters):
  TM = ApicalDependentSequenceMemory(**getDefaultL4Params(2048))
  pooler = ColumnPooler(**getDefaultL2Params(2048, seed))


  print "Generating sequences..."
  sequenceMachine, generatedSequences, numbers = generateSequences(
    sequenceLength=sequenceLen, sequenceCount=numSequences,
    sharedRange=sharedRange, n = 2048, w = 40, seed=seed)

  sequences = convertSequenceMachineSequence(generatedSequences)

  pooler_representations = []
  s = 0

  characters = {}
  char_sequences = []

  sequence_order = range(numSequences)
  for i in xrange(training_iters):
    random.shuffle(sequence_order)
    for s in sequence_order:
      sequence = sequences[s]
      pooler_representation = numpy.asarray([], dtype = "int")
      TM_representation = numpy.asarray([], dtype = "int")
      char_sequences.append([])
      total_pooler_representation = set()
      t = 0
      for timestep in sequence:
        datapoint = numpy.asarray(list(timestep), dtype = "int")
        datapoint.sort()
        TM.compute(activeColumns = datapoint,
                   apicalInput = pooler_representation,
                   learn = True)
        TM_representation = TM.activeCells
        winners = TM.winnerCells
        predicted_cells = TM.predictedCells
        #megabursting = TM.megabursting
        #if i > 0:
        #  import ipdb; ipdb.set_trace()
        pooler.compute(feedforwardInput = TM_representation,
                       feedforwardGrowthCandidates = winners,
                       lateralInputs = (pooler_representation,),
                       predictedInput = predicted_cells,
                       learn = True)
        pooler_representation = pooler.activeCells
        if i == training_iters - 1 and t > 0:
          total_pooler_representation |= set(pooler_representation)
          print len(pooler_representation)
        #print pooler_representation, len(pooler_representation), (s, t)
        t += 1

      pooler.reset()
      if i == training_iters - 1:
        pooler_representations.append(total_pooler_representation)
      s += 1

  representations = pooler_representations
  #print representations
  for i in range(len(representations)):
    for j in range(i):
      print (i, j), "overlap:", len(representations[i] & representations[j]), "Length of i:", len(representations[i])