예제 #1
0
  def boostTestPhase2(self):

    y = numpy.zeros(self.columnDimensions, dtype = uintType)
    # Do 9 training batch through the input patterns
    for _ in range(10):
      for idx, v in enumerate(self.x):
        y.fill(0)
        self.sp.compute(v, True, y)
        self.winningIteration[y.nonzero()[0]] = self.sp.getIterationLearnNum()
        self.lastSDR[idx] = y.copy()

    # All the never-active columns should have duty cycle of 0
    dutyCycles = numpy.zeros(self.columnDimensions, dtype = GetNTAReal())
    self.sp.getActiveDutyCycles(dutyCycles)
    self.assertEqual(dutyCycles[self.winningIteration == 0].sum(), 0,
                     "Inactive columns have positive duty cycle.")

    boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
    self.sp.getBoostFactors(boost)
    self.assertLessEqual(numpy.max(boost[numpy.where(dutyCycles>0.1)]), 1.0,
                "Strongly active columns have high boost factors")
    self.assertGreaterEqual(numpy.min(boost[numpy.where(dutyCycles<0.1)]), 1.0,
                "Weakly active columns have low boost factors")

    # By now, every column should have been sufficiently boosted to win at least
    # once. The number of columns that have never won should now be 0
    numLosersAfter = (self.winningIteration == 0).sum()
    self.assertEqual(numLosersAfter, 0)

    # Because of the artificially induced thrashing, even the first two patterns
    # should have low overlap. Verify that the first two SDR's now have little
    # overlap
    self.assertLess(_computeOverlap(self.lastSDR[0], self.lastSDR[1]), 7,
                    "First two SDR's overlap significantly when they "
                    "shouldn't")
예제 #2
0
    def debugPrint(self):
        """
    Helpful debug print statements while debugging this test.
    """

        minDutyCycle = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
        self.sp.getMinActiveDutyCycles(minDutyCycle)

        activeDutyCycle = numpy.zeros(self.columnDimensions,
                                      dtype=GetNTAReal())
        self.sp.getActiveDutyCycles(activeDutyCycle)

        boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
        self.sp.getBoostFactors(boost)
        print "\n--------- ITERATION", (
            self.sp.getIterationNum()), "-----------------------"
        print "SP implementation:", self.spImplementation
        print "Learning iteration:",
        print "minactiveDutyCycle (lower cycles cause boosting to start):", (
            minDutyCycle[0])
        print "Max/min active duty cycle:", (activeDutyCycle.max(),
                                             activeDutyCycle.min())
        print "Average non-zero active duty cycle:", (
            activeDutyCycle[activeDutyCycle > 0].mean())
        print "Active duty cycle", activeDutyCycle
        print
        print "Boost factor for sp:", boost
        print
        print "Last winning iteration for each column"
        print self.winningIteration
        print "Number of columns that have won at some point:", (
            self.columnDimensions - (self.winningIteration == 0).sum())
예제 #3
0
    def boostTestPhase1(self):

        y = numpy.zeros(self.columnDimensions, dtype=uintType)

        # Do one training batch through the input patterns
        for idx, v in enumerate(self.x):
            y.fill(0)
            self.sp.compute(v, True, y)
            self.winningIteration[y.nonzero()
                                  [0]] = self.sp.getIterationLearnNum()
            self.lastSDR[idx] = y.copy()

        # The boost factor for all columns should be at 1.
        boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
        self.sp.getBoostFactors(boost)
        self.assertEqual((boost == 1).sum(), self.columnDimensions,
                         "Boost factors are not all 1")

        # At least half of the columns should have never been active.
        self.assertGreaterEqual(
            (self.winningIteration == 0).sum(), self.columnDimensions / 2,
            "More than half of the columns have been active")

        # All the never-active columns should have duty cycle of 0
        # All the at-least-once-active columns should have duty cycle >= 0.2
        dutyCycles = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
        self.sp.getActiveDutyCycles(dutyCycles)
        self.assertEqual(dutyCycles[self.winningIteration == 0].sum(), 0,
                         "Inactive columns have positive duty cycle.")
        self.assertGreaterEqual(
            dutyCycles[self.winningIteration > 0].min(), 0.2,
            "Active columns have duty cycle that is too low.")

        self.verifySDRProperties()
    def debugPrint(self):
        """
    Helpful debug print statements while debugging this test.
    """

        activeDutyCycle = numpy.zeros(self.columnDimensions,
                                      dtype=GetNTAReal())
        self.sp.getActiveDutyCycles(activeDutyCycle)

        boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
        self.sp.getBoostFactors(boost)
        print("\n--------- ITERATION", (self.sp.getIterationNum()),
              "-----------------------")
        print("SP implementation:", self.spImplementation)
        print("Learning iteration:", end=' ')
        print("Max/min active duty cycle:",
              (activeDutyCycle.max(), activeDutyCycle.min()))
        print("Average non-zero active duty cycle:",
              (activeDutyCycle[activeDutyCycle > 0].mean()))
        print("Active duty cycle", activeDutyCycle)
        print()
        print("Boost factor for sp:", boost)
        print()
        print("Last winning iteration for each column")
        print(self.winningIteration)
        print("Number of columns that have won at some point:",
              (self.columnDimensions - (self.winningIteration == 0).sum()))
예제 #5
0
  def initialize(self):
    # Zero out the spatial output in case it is requested
    self._spatialPoolerOutput = numpy.zeros(self.columnCount,
                                            dtype=GetNTAReal())

    # Zero out the rfInput in case it is requested
    self._spatialPoolerInput = numpy.zeros((1, self.inputWidth),
                                           dtype=GetNTAReal())

    # Allocate the spatial pooler
    self._allocateSpatialFDR(None)
예제 #6
0
    def compute(self, inputs, outputs):
        """
    Run one iteration of TM's compute.

    The guts of the compute are contained in the self._tmClass compute() call
    """

        if self._tm is None:
            raise RuntimeError("Temporal memory has not been initialized")

        activeColumns = set(numpy.where(inputs["activeColumns"] == 1)[0])

        if "activeExternalCells" in inputs:
            activeExternalCells = set(
                numpy.where(inputs["activeColumns"] == 1)[0])
        else:
            activeExternalCells = None

        if "activeApicalCells" in inputs:
            activeApicalCells = set(
                numpy.where(inputs["activeColumns"] == 1)[0])
        else:
            activeApicalCells = None

        if "formInternalConnections" in inputs:
            formInternalConnections = inputs["formInternalConnections"]
        else:
            formInternalConnections = True
        self._tm.compute(activeColumns,
                         activeExternalCells=activeExternalCells,
                         activeApicalCells=activeApicalCells,
                         formInternalConnections=formInternalConnections,
                         learn=self.learningMode)

        activeCellsOutput = numpy.zeros(
            self.getOutputElementCount("activeCells"), dtype=GetNTAReal())
        predictedActiveCellsOutput = numpy.zeros(
            self.getOutputElementCount("predictedActiveCells"),
            dtype=GetNTAReal())

        activeCells = [
            self._tm.getCellIndex(cell) for cell in (self._tm.activeCells)
        ]
        activeCellsOutput[activeCells] = 1.0
        preditedActiveCells = [
            self._tm.getCellIndex(cell)
            for cell in (self._tm.predictedActiveCells)
        ]
        predictedActiveCellsOutput[preditedActiveCells] = 1.0

        outputs["activeCells"][:] = predictedActiveCellsOutput[:]
        outputs["predictedActiveCells"][:] = predictedActiveCellsOutput[:]
def plotPermanences(network=None,
                    savedNetworkFile="mnist_net.nta",
                    columnList=None,
                    iteration=0):
    """
  Plots the permanences of the top columns into a single master image
  If columnList is specified, uses those columns otherwise extracts the
  most active columns from the spatial pooler using duty cycle.
  """
    # Get the spatial pooler from the network, otherwise read it from checkpoint.
    if network is None:
        network = Network(savedNetworkFile)
    spRegion = network.regions["SP"]
    spSelf = spRegion.getSelf()
    sp = spSelf._sfdr

    # If we are not given a column list, retrieve columns with highest duty cycles
    dutyCycles = numpy.zeros(sp.getNumColumns(), dtype=GetNTAReal())
    sp.getActiveDutyCycles(dutyCycles)
    if columnList is None:
        mostActiveColumns = list(dutyCycles.argsort())
        mostActiveColumns.reverse()
        columnList = mostActiveColumns[0:400]
        #print columnList

    # Create empty master image with the top 25 columns. We will paste
    # individual column images into this image
    numImagesPerRowInMaster = 20
    masterImage = Image.new("L", ((32 + 2) * numImagesPerRowInMaster,
                                  (32 + 2) * numImagesPerRowInMaster), 255)

    for rank, col in enumerate(columnList):
        #print "Col=",col,"rank=",rank,"dutyCycle=",dutyCycles[col]
        pyPerm = numpy.zeros(sp.getNumInputs(), dtype=GetNTAReal())
        sp.getPermanence(col, pyPerm)

        # Create small image for each column
        pyPerm = pyPerm / pyPerm.max()
        pyPerm = (pyPerm * 255.0)
        pyPerm = pyPerm.reshape((32, 32))
        pyPerm = (pyPerm).astype('uint8')
        img = Image.fromarray(pyPerm)

        # Paste it into master image
        if rank < numImagesPerRowInMaster * numImagesPerRowInMaster:
            x = rank % numImagesPerRowInMaster * (32 + 2)
            y = (rank / numImagesPerRowInMaster) * (32 + 2)
            masterImage.paste(img, (x, y))

    # Save master image
    masterImage.save("master_%05d.png" % (iteration))
    def boostTestPhase4(self):
        boostAtBeg = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
        self.sp.getBoostFactors(boostAtBeg)

        # Do one more iteration through the input patterns with learning OFF
        y = numpy.zeros(self.columnDimensions, dtype=uintType)
        for _, v in enumerate(self.x):
            y.fill(0)
            self.sp.compute(v, False, y)

            boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
            self.sp.getBoostFactors(boost)
            self.assertEqual(boost.sum(), boostAtBeg.sum(),
                             "Boost factors changed when learning is off")
예제 #9
0
파일: sp_region.py 프로젝트: emmaai/nupic
  def initialize(self):
    """
    Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.initialize`.
    """
    # Zero out the spatial output in case it is requested
    self._spatialPoolerOutput = numpy.zeros(self.columnCount,
                                            dtype=GetNTAReal())

    # Zero out the rfInput in case it is requested
    self._spatialPoolerInput = numpy.zeros((1, self.inputWidth),
                                           dtype=GetNTAReal())

    # Allocate the spatial pooler
    self._allocateSpatialFDR(None)
예제 #10
0
def proximalSynapses(model, result):
    """"
    Returns the permanences of the spatial pooler synapses.
    """
    from nupic.bindings.math import GetNTAReal
    realType = GetNTAReal()

    sp = model._getSPRegion().getSelf()._sfdr
    numColumns = sp.getNumColumns()
    numInputs = sp.getNumInputs()
    proximalSynapses = []

    for column in xrange(numColumns):

        #Get permanence values
        permanences = np.zeros(numInputs).astype(realType)
        sp.getPermanence(column, permanences)
        permanences = permanences.astype(float)

        #Get potential synapse map
        potentials = np.zeros(numInputs).astype('uint32')
        sp.getPotential(column, potentials)
        potentials = potentials.astype(bool)

        #Put everything into a list of indexes beacuse numpy is rubbish
        for i in xrange(len(permanences)):
            if potentials[i]:
                proximalSynapses.append([column, i, permanences[i]])
    return proximalSynapses
예제 #11
0
    def compute(self, inputs, outputs):
        """
    Run one iteration of TemporalPoolerRegion's compute.

    The guts of the compute are contained in the self._poolerClass compute() call
    """
        activeCells = inputs["activeCells"]
        predictedActiveCells = inputs["predictedActiveCells"] if (
            "predictedActiveCells" in inputs) else numpy.zeros(
                self._inputWidth, dtype=uintDType)

        resetSignal = False
        if 'resetIn' in inputs:
            if len(inputs['resetIn']) != 1:
                raise Exception("resetIn has invalid length")

            if inputs['resetIn'][0] != 0:
                self.reset()

        mostActiveCellsIndices = self._pooler.compute(activeCells,
                                                      predictedActiveCells,
                                                      self.learningMode)

        # Convert to SDR
        outputs["mostActiveCells"][:] = numpy.zeros(self._columnCount,
                                                    dtype=GetNTAReal())
        outputs["mostActiveCells"][mostActiveCellsIndices] = 1
예제 #12
0
파일: scalar.py 프로젝트: tusharp/nupic
  def _getTopDownMapping(self):
    """ Return the interal _topDownMappingM matrix used for handling the
    bucketInfo() and topDownCompute() methods. This is a matrix, one row per
    category (bucket) where each row contains the encoded output for that
    category.
    """

    # Do we need to build up our reverse mapping table?
    if self._topDownMappingM is None:

      # The input scalar value corresponding to each possible output encoding
      if self.periodic:
        self._topDownValues = numpy.arange(self.minval + self.resolution / 2.0,
                                           self.maxval,
                                           self.resolution)
      else:
        #Number of values is (max-min)/resolutions
        self._topDownValues = numpy.arange(self.minval,
                                           self.maxval + self.resolution / 2.0,
                                           self.resolution)

      # Each row represents an encoded output pattern
      numCategories = len(self._topDownValues)
      self._topDownMappingM = SM32(numCategories, self.n)

      outputSpace = numpy.zeros(self.n, dtype=GetNTAReal())
      for i in xrange(numCategories):
        value = self._topDownValues[i]
        value = max(value, self.minval)
        value = min(value, self.maxval)
        self.encodeIntoArray(value, outputSpace, learn=False)
        self._topDownMappingM.setRowFromDense(i, outputSpace)

    return self._topDownMappingM
예제 #13
0
    def boostTestPhase3(self):

        # Do two more training batches through the input patterns
        y = numpy.zeros(self.columnDimensions, dtype=uintType)
        for _ in range(2):
            for idx, v in enumerate(self.x):
                y.fill(0)
                self.sp.compute(v, True, y)
                self.winningIteration[y.nonzero()
                                      [0]] = self.sp.getIterationLearnNum()
                self.lastSDR[idx] = y.copy()

                # The boost factor for all columns that just won should be at 1.
                boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
                self.sp.getBoostFactors(boost)
                self.assertEqual(((boost[y.nonzero()[0]]) != 1).sum(), 0,
                                 "Boost factors of winning columns not 1")

        # By now, every column should have been sufficiently boosted to win at least
        # once. The number of columns that have never won should now be 0
        numLosersAfter = (self.winningIteration == 0).sum()
        self.assertEqual(numLosersAfter, 0)

        # Because of the artificially induced thrashing, even the first two patterns
        # should have low overlap. Verify that the first two SDR's now have little
        # overlap
        self.assertLess(
            _computeOverlap(self.lastSDR[0], self.lastSDR[1]), 7,
            "First two SDR's overlap significantly when they "
            "shouldn't")
예제 #14
0
def trainNetwork(net):
  # Some stuff we will need later
  sensor = net.regions['sensor']
  sp = net.regions["SP"]
  pysp = sp.getSelf()
  classifier = net.regions['classifier']
  dutyCycles = numpy.zeros(DEFAULT_SP_PARAMS['columnCount'], dtype=GetNTAReal())

  print "============= Loading training images ================="
  t1 = time.time()
  sensor.executeCommand(["loadMultipleImages", "mnist/training"])
  numTrainingImages = sensor.getParameter('numImages')
  start = time.time()
  print "Load time for training images:",start-t1
  print "Number of training images",numTrainingImages

  # First train just the SP
  print "============= SP training ================="
  classifier.setParameter('inferenceMode', 0)
  classifier.setParameter('learningMode', 0)
  sp.setParameter('learningMode', 0)
  sp.setParameter('inferenceMode', 1)
  nTrainingIterations = numTrainingImages
  for i in range(nTrainingIterations):
    net.run(1)
    dutyCycles += pysp._spatialPoolerOutput
    if i%(nTrainingIterations/100)== 0:
      print "Iteration",i,"Category:",sensor.getOutputData('categoryOut')

  # Now train just the classifier sequentially on all training images
  print "============= Classifier training ================="
  sensor.setParameter('explorer','Flash')
  classifier.setParameter('inferenceMode', 0)
  classifier.setParameter('learningMode', 1)
  sp.setParameter('learningMode', 0)
  sp.setParameter('inferenceMode', 1)
  for i in range(numTrainingImages):
    net.run(1)
    if i%(numTrainingImages/100)== 0:
      print "Iteration",i,"Category:",sensor.getOutputData('categoryOut')

  # Save the trained network
  net.save("mnist_net.nta")

  # Print various statistics
  print "============= Training statistics ================="
  print "Training time:",time.time() - start
  tenPct= nTrainingIterations/10
  print "My duty cycles:",fdrutilities.numpyStr(dutyCycles, format="%g")
  print "Number of nonzero duty cycles:",len(dutyCycles.nonzero()[0])
  print "Mean/Max duty cycles:",dutyCycles.mean(), dutyCycles.max()
  print "Number of columns that won for > 10% patterns",\
            (dutyCycles>tenPct).sum()
  print "Number of columns that won for > 20% patterns",\
            (dutyCycles>2*tenPct).sum()
  print "Num categories learned",classifier.getParameter('categoryCount')
  print "Number of patterns stored",classifier.getParameter('patternCount')

  return net
예제 #15
0
def trainNetwork(net, dataDir, networkFile="mnist_net.nta"):
  # Some stuff we will need later
  sensor = net.regions["sensor"]
  sp = net.regions["SP"]
  pysp = sp.getSelf()
  classifier = net.regions["classifier"]
  dutyCycles = numpy.zeros(DEFAULT_SP_PARAMS["columnCount"], dtype=GetNTAReal())

  print "============= Loading training images ================="
  t1 = time.time()
  sensor.executeCommand(["loadMultipleImages", os.path.join(dataDir, "training")])
  numTrainingImages = sensor.getParameter("numImages")
  start = time.time()
  print "Load time for training images:",start-t1
  print "Number of training images",numTrainingImages

  # First train just the SP
  print "============= SP training ================="
  classifier.setParameter("inferenceMode", 0)
  classifier.setParameter("learningMode", 0)
  sp.setParameter("learningMode", 1)
  sp.setParameter("inferenceMode", 0)
  nTrainingIterations = numTrainingImages
  for i in range(nTrainingIterations):
    net.run(1)
    dutyCycles += pysp._spatialPoolerOutput
    if i%(nTrainingIterations/100)== 0:
      print "Iteration",i,"Category:",sensor.getOutputData("categoryOut")

  # Now train just the classifier sequentially on all training images
  print "============= Classifier training ================="
  sensor.setParameter("explorer",yaml.dump(["Flash"]))
  classifier.setParameter("inferenceMode", 0)
  classifier.setParameter("learningMode", 1)
  sp.setParameter("learningMode", 0)
  sp.setParameter("inferenceMode", 1)
  for i in range(numTrainingImages):
    net.run(1)
    if i%(numTrainingImages/100)== 0:
      print "Iteration",i,"Category:",sensor.getOutputData("categoryOut")

  # Save the trained network
  net.save(networkFile)

  # Print various statistics
  print "============= Training statistics ================="
  print "Training time:",time.time() - start
  tenPct= nTrainingIterations/10
  print "My duty cycles:", numpy.array_str(dutyCycles)
  print "Number of nonzero duty cycles:",len(dutyCycles.nonzero()[0])
  print "Mean/Max duty cycles:",dutyCycles.mean(), dutyCycles.max()
  print "Number of columns that won for > 10% patterns",\
            (dutyCycles>tenPct).sum()
  print "Number of columns that won for > 20% patterns",\
            (dutyCycles>2*tenPct).sum()
  print "Num categories learned",classifier.getParameter("categoryCount")
  print "Number of patterns stored",classifier.getParameter("patternCount")

  return net
    def debugPrint(self, sp, name):
        """
    Helpful debug print statements while debugging this test.
    """
        minDutyCycle = numpy.zeros(sp.getNumColumns(), dtype=GetNTAReal())
        sp.getMinActiveDutyCycles(minDutyCycle)

        activeDutyCycle = numpy.zeros(sp.getNumColumns(), dtype=GetNTAReal())
        sp.getActiveDutyCycles(activeDutyCycle)

        boost = numpy.zeros(sp.getNumColumns(), dtype=GetNTAReal())
        sp.getBoostFactors(boost)
        print "====================\n", name
        print "Learning iteration:", sp.getIterationNum()
        print "Min duty cycles:", minDutyCycle[0]
        print "Active duty cycle", activeDutyCycle
        print
        print "Boost factor for sp:", boost
예제 #17
0
    def boostTestPhase2(self):

        y = numpy.zeros(self.columnDimensions, dtype=uintType)
        boost = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())

        # Do 9 training batch through the input patterns
        for _ in range(9):
            for idx, v in enumerate(self.x):
                y.fill(0)
                self.sp.compute(v, True, y)
                self.winningIteration[y.nonzero()
                                      [0]] = self.sp.getIterationLearnNum()
                self.lastSDR[idx] = y.copy()

                # The boost factor for all columns should be at 1.
                self.sp.getBoostFactors(boost)
                self.assertEqual((boost == 1).sum(), self.columnDimensions,
                                 "Boost factors are not all 1")

        # Roughly half of the columns should have never been active.
        self.assertGreaterEqual(
            (self.winningIteration == 0).sum(), 0.4 * self.columnDimensions,
            "More than 60% of the columns have been active")

        # All the never-active columns should have duty cycle of 0
        dutyCycles = numpy.zeros(self.columnDimensions, dtype=GetNTAReal())
        self.sp.getActiveDutyCycles(dutyCycles)
        self.assertEqual(dutyCycles[self.winningIteration == 0].sum(), 0,
                         "Inactive columns have positive duty cycle.")

        # The average at-least-once-active columns should have duty cycle >= 0.15
        # and <= 0.25
        avg = (dutyCycles[dutyCycles > 0].mean())
        self.assertGreaterEqual(avg, 0.15,
                                "Average on-columns duty cycle is too low.")
        self.assertLessEqual(avg, 0.30,
                             "Average on-columns duty cycle is too high.")

        self.verifySDRProperties()
예제 #18
0
def proximalSegmentsFromSP(sp, activeBits, onlyActiveSynapses,
                           onlyConnectedSynapses, sourcePath):
    segsByColCell = {}
    synPermConnected = sp.getSynPermConnected()
    synapsePotentials = np.zeros(sp.getNumInputs()).astype('uint32')
    synapsePermanences = np.zeros(sp.getNumInputs()).astype(GetNTAReal())
    activeMask = np.zeros(sp.getNumInputs(), dtype=bool)
    activeMask[list(activeBits)] = True
    for column in range(sp.getNumColumns()):
        segsByColCell[column] = {}

        sp.getPotential(column, synapsePotentials)
        potentialMask = synapsePotentials == 1

        sp.getPermanence(column, synapsePermanences)
        connectedMask = synapsePermanences >= synPermConnected

        activeConnectedMask = activeMask & potentialMask & connectedMask
        activeSyns = [(inputBit, synapsePermanences[inputBit])
                      for inputBit in activeConnectedMask.nonzero()[0]]

        inactiveSyns = []
        if not onlyActiveSynapses:
            inactiveConnectedMask = ~activeMask & potentialMask & connectedMask
            inactiveSyns = [(inputBit, synapsePermanences[inputBit])
                            for inputBit in inactiveConnectedMask.nonzero()[0]]

        disconnectedSyns = []
        if not onlyConnectedSynapses:
            disconnectedMask = potentialMask & ~connectedMask
            if onlyActiveSynapses:
                disconnectedMask = disconnectedMask & activeMask
            inactiveConnectedMask = ~activeMask & potentialMask & connectedMask
            disconnectedSyns = [
                (inputBit, synapsePermanences[inputBit])
                for inputBit in inactiveConnectedMask.nonzero()[0]
            ]

        segsByColCell[column][-1] = [{
            'synapses': {
                sourcePath: {
                    'active': activeSyns,
                    'inactive': inactiveSyns,
                    'disconnectedSyns': disconnectedSyns,
                }
            },
        }]

    return segsByColCell
예제 #19
0
def fastProximalPermanences(model, result):
    """"
    Returns the permanences of the spatial pooler synapses.
    """
    from nupic.bindings.math import GetNTAReal
    realType = GetNTAReal()

    sp = model._getSPRegion().getSelf()._sfdr
    numInputs = sp.getNumInputs()
    numColumns = sp.getNumColumns()

    allPermanences = []
    for column in xrange(numColumns):
        permanences = np.zeros(numInputs).astype(realType)
        sp.getPermanence(column, permanences)
        allPermanences.append(permanences)
    return np.concatenate(allPermanences)
예제 #20
0
    def compute(self, inputs, outputs):
        """
    Run one iteration of PoolingRegion's compute.

    The guts of the compute are contained in the self._poolerClass compute() call
    """
        activeCells = inputs["activeCells"]
        predictedActiveCells = inputs["predictedActiveCells"]

        mostActiveCellsIndices = self._pooler.compute(activeCells,
                                                      predictedActiveCells,
                                                      self.learningMode)

        # Convert to SDR
        outputs["mostActiveCells"][:] = numpy.zeros(self._columnCount,
                                                    dtype=GetNTAReal())
        outputs["mostActiveCells"][mostActiveCellsIndices] = 1
예제 #21
0
    def _getTopDownMapping(self):
        """ Return the interal _topDownMappingM matrix used for handling the
    bucketInfo() and topDownCompute() methods. This is a matrix, one row per
    category (bucket) where each row contains the encoded output for that
    category.
    """

        if self._topDownMappingM is None:
            self._topDownMappingM = SM32(self._numBins, self.n)

            outputSpace = numpy.zeros(self.n, dtype=GetNTAReal())

            for i in xrange(self._numBins):
                outputSpace[:] = 0.0
                outputSpace[i:i + self.w] = 1.0
                self._topDownMappingM.setRowFromDense(i, outputSpace)

        return self._topDownMappingM
예제 #22
0
파일: SPRegion.py 프로젝트: smg1/nupic
    def _initEphemerals(self):
        """
    Initialize all ephemerals used by derived classes.
    """

        if hasattr(self, '_sfdr') and self._sfdr:
            self._spatialPoolerOutput = numpy.zeros(self.columnCount,
                                                    dtype=GetNTAReal())
        else:
            self._spatialPoolerOutput = None  # Will be filled in initInNetwork

        # Direct logging support (faster than node watch)
        self._fpLogSPInput = None
        self._fpLogSP = None
        self._fpLogSPDense = None
        self.logPathInput = ""
        self.logPathOutput = ""
        self.logPathOutputDense = ""
예제 #23
0
  def _getTopDownMapping(self):
    """ Return the interal _topDownMappingM matrix used for handling the
    bucketInfo() and topDownCompute() methods. This is a matrix, one row per
    category (bucket) where each row contains the encoded output for that
    category.
    """

    # -------------------------------------------------------------------------
    # Do we need to build up our reverse mapping table?
    if self._topDownMappingM is None:

      # Each row represents an encoded output pattern
      self._topDownMappingM = SM32(self.ncategories, self.n)

      outputSpace = numpy.zeros(self.n, dtype=GetNTAReal())
      for i in range(self.ncategories):
        self.encodeIntoArray(self.categories[i], outputSpace)
        self._topDownMappingM.setRowFromDense(i, outputSpace)

    return self._topDownMappingM
예제 #24
0
  def boostTestPhase1(self):
    
    y = numpy.zeros(self.columnDimensions, dtype = uintType)
    # Do one batch through the input patterns while learning is Off
    for idx, v in enumerate(self.x):
      y.fill(0)
      self.sp.compute(v, False, y)
      self.winningIteration[y.nonzero()[0]] = self.sp.getIterationLearnNum()
      self.lastSDR[idx] = y.copy()

    # The boost factor for all columns should be at 1.
    boost = numpy.zeros(self.columnDimensions, dtype = GetNTAReal())
    self.sp.getBoostFactors(boost)
    self.assertEqual((boost==1).sum(), self.columnDimensions,
      "Boost factors are not all 1")
    
    # At least half of the columns should have never been active.
    self.assertGreaterEqual((self.winningIteration==0).sum(),
      self.columnDimensions/2, "More than half of the columns have been active")

    self.verifySDRProperties()
예제 #25
0
  def compute(self, inputs, outputs):
    """
    Run one iteration of TemporalPoolerRegion's compute.

    Note that if the reset signal is True (1) we assume this iteration
    represents the *end* of a sequence. The output will contain the pooled
    representation to this point and any history will then be reset. The output
    at the next compute will start fresh.
    """

    resetSignal = False
    if 'resetIn' in inputs:
      if len(inputs['resetIn']) != 1:
        raise Exception("resetIn has invalid length")

      if inputs['resetIn'][0] != 0:
        resetSignal = True

    outputs["mostActiveCells"][:] = numpy.zeros(
                                      self._columnCount, dtype=GetNTAReal())

    if self._poolerType == "simpleUnion":
      self._pooler.unionIntoArray(inputs["activeCells"],
                                  outputs["mostActiveCells"],
                                  forceOutput = resetSignal)
    else:
      predictedActiveCells = inputs["predictedActiveCells"] if (
        "predictedActiveCells" in inputs) else numpy.zeros(self._inputWidth,
                                                           dtype=uintDType)

      mostActiveCellsIndices = self._pooler.compute(inputs["activeCells"],
                                                    predictedActiveCells,
                                                    self.learningMode)

      outputs["mostActiveCells"][mostActiveCellsIndices] = 1

    if resetSignal:
        self.reset()
예제 #26
0
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program.  If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------

import random as rand

from nupic.encoders import adaptivescalar, sdrcategory, date
from nupic.bindings.math import GetNTAReal
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.generators.distributions import *

realDType = GetNTAReal()


class DataGenerator():
    """The DataGenerator provides a framework for generating, encoding, saving
  and exporting records. Each column of the output contains records with a
  specific set of parameters such as encoderType, n, w, etc. This interface
  is intended to be used for testing the spatial pooler, temporal pooler and
  for generating artificial datasets.
  """
    def __init__(self, name='testDataset', seed=42, verbosity=0):
        """Initialize the dataset generator with a random seed and a name"""

        self.name = name
        self.verbosity = verbosity
        self.setSeed(seed)
# http://numenta.org/licenses/
# ----------------------------------------------------------------------

import cPickle as pickle
import numpy
import unittest2 as unittest

from nupic.support.unittesthelpers.algorithm_test_helpers import (
    getNumpyRandomGenerator, convertPermanences)

from nupic.bindings.algorithms import FlatSpatialPooler as CPPFlatSpatialPooler
from nupic.bindings.math import GetNTAReal, Random
from nupic.research.flat_spatial_pooler import (FlatSpatialPooler as
                                                PyFlatSpatialPooler)

realType = GetNTAReal()
uintType = "uint32"
NUM_RECORDS = 100


class FlatSpatialPoolerCompatabilityTest(unittest.TestCase):
    def setUp(self):
        # Set to 1 for more verbose debugging output
        self.verbosity = 0

    def assertListAlmostEqual(self, alist, blist):
        self.assertEqual(len(alist), len(blist), "Lists have different length")
        for idx, val in enumerate(alist):
            self.assertAlmostEqual(
                val,
                blist[idx],
예제 #28
0
# ----------------------------------------------------------------------

import random
import copy
import numpy
from nupic.bindings.algorithms import SpatialPooler
# Uncomment below line to use python SP
# from nupic.research.spatial_pooler import SpatialPooler
from nupic.bindings.math import GetNTAReal
from htmresearch.frameworks.union_temporal_pooling.activation.excite_functions.excite_functions_all import (
    LogisticExciteFunction, FixedExciteFunction)

from htmresearch.frameworks.union_temporal_pooling.activation.decay_functions.decay_functions_all import (
    ExponentialDecayFunction, NoDecayFunction)

REAL_DTYPE = GetNTAReal()
UINT_DTYPE = "uint32"
_TIE_BREAKER_FACTOR = 0.000001


class UnionTemporalPooler(SpatialPooler):
    """
  Experimental Union Temporal Pooler Python implementation. The Union Temporal
  Pooler builds a "union SDR" of the most recent sets of active columns. It is
  driven by active-cell input and, more strongly, by predictive-active cell
  input. The latter is more likely to produce active columns. Such winning
  columns will also tend to persist longer in the union SDR.
  """
    def __init__(
            self,
            # union_temporal_pooler.py parameters
예제 #29
0
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Module of statistical data structures and functions used in learning algorithms
and for analysis of HTM network inputs and outputs.
"""

import random

import numpy

from nupic.bindings.math import GetNTAReal, SparseMatrix
from functools import reduce

dtype = GetNTAReal()


def pickByDistribution(distribution, r=None):
    """
  Pick a value according to the provided distribution.

  Example:

  ::

    pickByDistribution([.2, .1])

  Returns 0 two thirds of the time and 1 one third of the time.

  :param distribution: Probability distribution. Need not be normalized.