def testPredictiveCells(self): """ This tests that we don't get empty predicitve cells """ tm = TM( columnDimensions=(parameters1["sp"]["columnCount"], ), cellsPerColumn=parameters1["tm"]["cellsPerColumn"], activationThreshold=parameters1["tm"]["activationThreshold"], initialPermanence=parameters1["tm"]["initialPerm"], connectedPermanence=parameters1["sp"]["synPermConnected"], minThreshold=parameters1["tm"]["minThreshold"], maxNewSynapseCount=parameters1["tm"]["newSynapseCount"], permanenceIncrement=parameters1["tm"]["permanenceInc"], permanenceDecrement=parameters1["tm"]["permanenceDec"], predictedSegmentDecrement=0.0, maxSegmentsPerCell=parameters1["tm"]["maxSegmentsPerCell"], maxSynapsesPerSegment=parameters1["tm"]["maxSynapsesPerSegment"], ) activeColumnsA = SDR(parameters1["sp"]["columnCount"]) activeColumnsB = SDR(parameters1["sp"]["columnCount"]) activeColumnsA.randomize(sparsity=0.4, seed=1) activeColumnsB.randomize(sparsity=0.4, seed=1) # give pattern A - bursting # give pattern B - bursting # give pattern A - should be predicting tm.activateDendrites(True) self.assertTrue(tm.getPredictiveCells().getSum() == 0) predictiveCellsSDR = tm.getPredictiveCells() tm.activateCells(activeColumnsA, True) _print("\nColumnsA") _print("activeCols:" + str(len(activeColumnsA.sparse))) _print("activeCells:" + str(len(tm.getActiveCells().sparse))) _print("predictiveCells:" + str(len(predictiveCellsSDR.sparse))) tm.activateDendrites(True) self.assertTrue(tm.getPredictiveCells().getSum() == 0) predictiveCellsSDR = tm.getPredictiveCells() tm.activateCells(activeColumnsB, True) _print("\nColumnsB") _print("activeCols:" + str(len(activeColumnsB.sparse))) _print("activeCells:" + str(len(tm.getActiveCells().sparse))) _print("predictiveCells:" + str(len(predictiveCellsSDR.sparse))) tm.activateDendrites(True) self.assertTrue(tm.getPredictiveCells().getSum() > 0) predictiveCellsSDR = tm.getPredictiveCells() tm.activateCells(activeColumnsA, True) _print("\nColumnsA") _print("activeCols:" + str(len(activeColumnsA.sparse))) _print("activeCells:" + str(len(tm.getActiveCells().sparse))) _print("predictiveCells:" + str(len(predictiveCellsSDR.sparse)))
def testCompute(self): """ Check that there are no errors in call to compute. """ inputs = SDR( 100 ).randomize( .05 ) tm = TM( inputs.dimensions) tm.compute( inputs, True ) active = tm.getActiveCells() self.assertTrue( active.getSum() > 0 )
def testPerformanceLarge(self): LARGE = 9000 ITERS = 100 # This is lowered for unittest. Try 1000, 5000,... from htm.bindings.engine_internal import Timer t = Timer() inputs = SDR( LARGE ).randomize( .10 ) tm = TM( inputs.dimensions) for i in range(ITERS): inputs = inputs.randomize( .10 ) t.start() tm.compute( inputs, True ) active = tm.getActiveCells() t.stop() self.assertTrue( active.getSum() > 0 ) t_total = t.elapsed() speed = t_total * 1000 / ITERS #time ms/iter self.assertTrue(speed < 40.0)
for count, i in enumerate(range(len(train_set))): # encode the current integer rdseSDR = rdseEncoder.encode(train_set[i]) # create an SDR for SP output activeColumns = SDR( dimensions = tm.getColumnDimensions()[0] ) # convert the SDR to SP # this is optional if the output from the encoder is # already a sparse binary representation # otherwise this step may be skipped as seen in # tutorials online sp.compute(rdseSDR, True, activeColumns) tm.compute(activeColumns, learn=True) tm.activateDendrites(True) tm_actCells = tm.getActiveCells() pred_actCells.append(tm_actCells) # anamoly_forall.append(tm.anomaly) label = int(train_set[i]) # this is a neural network being trained to # know which SDR corresponds to which integer predict.learn(count, tm_actCells, label) predict.reset() next_elem = [] # need to interpret the predictions made by TM (these predictions # are in sparse representations and the brain does not need this added # step because it automatically recognizes what the SDRs represent; however, # the SDRs from TM are not in our brain, so we needed an added step to inter- # pret)