示例#1
0
 def testCompute(self):
     """ Check that there are no errors in call to compute. """
     inputs = SDR(100).randomize(.05)
     active = SDR(100)
     sp = SP(inputs.dimensions, active.dimensions, stimulusThreshold=1)
     sp.compute(inputs, True, active)
     assert (active.getSum() > 0)
示例#2
0
    def _runGetPermanenceTrial(self, float_type):
        """ 
    Check that getPermanence() returns values for a given float_type. 
    These tests are sensitive to the data type. This because if you pass a 
    numpy array of the type matching the C++ argument then PyBind11 does not
    convert the data, and the C++ code can modify the data in-place
    If you pass the wrong data type, then PyBind11 does a conversion so 
    your C++ function only gets a converted copy of a numpy array, and any changes 
    are lost after returning
    """
        inputs = SDR(100).randomize(.05)
        active = SDR(100)
        sp = SP(inputs.dimensions, active.dimensions, stimulusThreshold=1)

        # Make sure that the perms start off zero.
        perms_in = np.zeros(sp.getNumInputs(), dtype=float_type)
        sp.setPermanence(0, perms_in)
        perms = np.zeros(sp.getNumInputs(), dtype=float_type)
        sp.getPermanence(0, perms)
        assert (perms.sum() == 0.0)

        for i in range(10):
            sp.compute(inputs, True, active)

        # There should be at least one perm none zero
        total = np.zeros(sp.getNumInputs(), dtype=float_type)
        for i in range(100):
            perms = np.zeros(sp.getNumInputs(), dtype=float_type)
            sp.getPermanence(i, perms)
            total = total + perms
        assert (total.sum() > 0.0)
示例#3
0
    def testNupicSpatialPoolerPickling(self):
        """Test pickling / unpickling of HTM SpatialPooler."""
        inputs = SDR(100).randomize(.05)
        active = SDR(100)
        sp = SP(inputs.dimensions, active.dimensions, stimulusThreshold=1)

        for _ in range(10):
            sp.compute(inputs, True, active)

        if sys.version_info[0] >= 3:
            proto = 3
        else:
            proto = 2

        # Simple test: make sure that dumping / loading works...
        pickledSp = pickle.dumps(sp, proto)
        sp2 = pickle.loads(pickledSp)
        self.assertEqual(str(sp), str(sp2),
                         "Simple SpatialPooler pickle/unpickle failed.")

        # or using File I/O
        f = tempfile.TemporaryFile()  # simulates opening a file ('wb')
        pickle.dump(sp, f, proto)
        f.seek(0)
        sp3 = pickle.load(f)
        #print(str(sp3))
        f.close()
        self.assertEqual(str(sp), str(sp3),
                         "File I/O SpatialPooler pickle/unpickle failed.")
示例#4
0
    def _runGetConnectedCounts(self, uint_type):
        """ Check that getConnectedCounts() returns values. """
        inputs = SDR(100).randomize(.05)
        active = SDR(100)
        sp = SP(inputs.dimensions, active.dimensions, stimulusThreshold=1)

        for _ in range(10):
            sp.compute(inputs, True, active)

        # There should be at least one connected none zero
        connected = np.zeros(sp.getNumColumns(), dtype=uint_type)
        sp.getConnectedCounts(connected)
        assert (connected.sum() > 0)
示例#5
0
    def _runGetConnectedSynapses(self, uint_type):
        """ Check that getConnectedSynapses() returns values. """
        inputs = SDR(100).randomize(.05)
        active = SDR(100)
        sp = SP(inputs.dimensions, active.dimensions, stimulusThreshold=1)

        for i in range(10):
            sp.compute(inputs, True, active)

        # There should be at least one connected none zero
        total = np.zeros(sp.getNumInputs(), dtype=uint_type)
        for i in range(100):
            connected = np.zeros(sp.getNumInputs(), dtype=uint_type)
            sp.getConnectedSynapses(i, connected)
            total = total + connected
        assert (total.sum() > 0)
示例#6
0
  def testSpatialPoolerSerialization(self):
     # Test serializing with saveToFile() and loadFromFile()
     inputs = SDR( 100 ).randomize( .05 )
     active = SDR( 100 )
     sp = SP( inputs.dimensions, active.dimensions, stimulusThreshold = 1 )

     for _ in range(10):
       sp.compute( inputs, True, active )
      
     #print(str(sp))
     
     # The SP now has some data in it, try serialization.  
     file = "spatial_pooler_test_save2.bin"
     sp.saveToFile(file, "PORTABLE")
     sp3 = SP()
     sp3.loadFromFile(file, "PORTABLE")
     self.assertEqual(str(sp), str(sp3), "HTM SpatialPooler serialization (using saveToFile/loadFromFile) failed.")
     os.remove(file)
示例#7
0
  def testNupicSpatialPoolerSavingToString(self):
    """Test writing to and reading from NuPIC SpatialPooler."""
    inputs = SDR( 100 ).randomize( .05 )
    active = SDR( 100 )
    sp = SP( inputs.dimensions, active.dimensions, stimulusThreshold = 1 )

    for _ in range(10):
      sp.compute( inputs, True, active )

    # Simple test: make sure that writing/reading works...
    s = sp.writeToString()

    sp2 = SP(columnDimensions=[32, 32])
    sp2.loadFromString(s)

    self.assertEqual(sp.getNumColumns(), sp2.getNumColumns(),
                     "NuPIC SpatialPooler write to/read from string failed.")
    self.assertEqual(str(sp), str(sp2),
                     "HTM SpatialPooler write to/read from string failed.")
示例#8
0
# -------
# A column connects to a subset of the input vector (specified
# by both the potentialRadius and potentialPct). The overlap score
# for a column is the number of connections to the input that become
# active when presented with a vector. When learning is 'on' in the SP,
# the active connections are reinforced, whereas those inactive are
# depressed (according to parameters synPermActiveInc and synPermInactiveDec.
# In order for the SP to create a sparse representation of the input, it
# will select a small percentage (usually 2%) of its most active columns,
# ie. columns with the largest overlap score.
# In this first part, we will create a histogram showing the overlap scores
# of the Spatial Pooler (SP) after feeding it with a random binary
# input. As well, the histogram will show the scores of those columns
# that are chosen to build the sparse representation of the input.

overlaps = sp.compute(inputSDR, False, outputSDR)
activeColsScores = []
for i in outputSDR.sparse:
    activeColsScores.append(overlaps[i])

print("")
print("---------------------------------")
print("Figure 1 shows a histogram of the overlap scores")
print("from all the columns in the spatial pooler, as well as the")
print("overlap scores of those columns that were selected to build a")
print("sparse representation of the input (shown in green).")
print("The SP chooses 2% of the columns with the largest overlap score")
print("to make such sparse representation.")
print("---------------------------------")
print("")
        # need this for predictions later
        pred_actCells = []
        # anamoly_forall = []
        for count, i in enumerate(range(len(train_set))):

            # encode the current integer
            rdseSDR = rdseEncoder.encode(train_set[i])
            # create an SDR for SP output
            activeColumns = SDR( dimensions = tm.getColumnDimensions()[0] )

            # convert the SDR to SP
            # this is optional if the output from the encoder is
            # already a sparse binary representation
            # otherwise this step may be skipped as seen in
            # tutorials online
            sp.compute(rdseSDR, True, activeColumns)
            tm.compute(activeColumns, learn=True)
            tm.activateDendrites(True)
            tm_actCells = tm.getActiveCells()
            pred_actCells.append(tm_actCells)
            # anamoly_forall.append(tm.anomaly)

            label = int(train_set[i])
            # this is a neural network being trained to
            # know which SDR corresponds to which integer
            predict.learn(count, tm_actCells, label)

    predict.reset()
    next_elem = []
    # need to interpret the predictions made by TM (these predictions
    # are in sparse representations and the brain does not need this added