示例#1
0
    def basicTest(self):
        """Basic test (creation, pickling, basic run of learning and inference)"""
        # Create TP object
        tp = TP10X2(
            numberOfCols=10,
            cellsPerColumn=3,
            initialPerm=0.2,
            connectedPerm=0.8,
            minThreshold=2,
            newSynapseCount=5,
            permanenceInc=0.1,
            permanenceDec=0.05,
            permanenceMax=1,
            globalDecay=0.05,
            activationThreshold=4,
            doPooling=False,
            segUpdateValidDuration=5,
            seed=SEED,
            verbosity=VERBOSITY,
        )
        tp.retrieveLearningStates = True

        # Save and reload
        tp.makeCells4Ephemeral = False
        pickle.dump(tp, open("test_tp10x.pkl", "wb"))
        tp2 = pickle.load(open("test_tp10x.pkl"))

        self.assertTrue(fdrutils.tpDiff2(tp, tp2, VERBOSITY, checkStates=False))

        # Learn
        for i in xrange(5):
            x = numpy.zeros(tp.numberOfCols, dtype="uint32")
            _RGEN.initializeUInt32Array(x, 2)
            tp.learn(x)

        # Save and reload after learning
        tp.reset()
        tp.makeCells4Ephemeral = False
        pickle.dump(tp, open("test_tp10x.pkl", "wb"))
        tp2 = pickle.load(open("test_tp10x.pkl"))
        self.assertTrue(fdrutils.tpDiff2(tp, tp2, VERBOSITY))

        ## Infer
        patterns = numpy.zeros((4, tp.numberOfCols), dtype="uint32")
        for i in xrange(4):
            _RGEN.initializeUInt32Array(patterns[i], 2)

        for i in xrange(10):
            x = numpy.zeros(tp.numberOfCols, dtype="uint32")
            _RGEN.initializeUInt32Array(x, 2)
            tp.infer(x)
            if i > 0:
                tp.checkPrediction2(patterns)
示例#2
0
    def basicTest(self):
        """Basic test (creation, pickling, basic run of learning and inference)"""
        # Create TP object
        tp = TP10X2(numberOfCols=10,
                    cellsPerColumn=3,
                    initialPerm=.2,
                    connectedPerm=0.8,
                    minThreshold=2,
                    newSynapseCount=5,
                    permanenceInc=.1,
                    permanenceDec=.05,
                    permanenceMax=1,
                    globalDecay=.05,
                    activationThreshold=4,
                    doPooling=False,
                    segUpdateValidDuration=5,
                    seed=SEED,
                    verbosity=VERBOSITY)
        tp.retrieveLearningStates = True

        # Save and reload
        tp.makeCells4Ephemeral = False
        pickle.dump(tp, open("test_tp10x.pkl", "wb"))
        tp2 = pickle.load(open("test_tp10x.pkl"))

        self.assertTrue(fdrutils.tpDiff2(tp, tp2, VERBOSITY,
                                         checkStates=False))

        # Learn
        for i in xrange(5):
            x = numpy.zeros(tp.numberOfCols, dtype='uint32')
            _RGEN.initializeUInt32Array(x, 2)
            tp.learn(x)

        # Save and reload after learning
        tp.reset()
        tp.makeCells4Ephemeral = False
        pickle.dump(tp, open("test_tp10x.pkl", "wb"))
        tp2 = pickle.load(open("test_tp10x.pkl"))
        self.assertTrue(fdrutils.tpDiff2(tp, tp2, VERBOSITY))

        ## Infer
        patterns = numpy.zeros((4, tp.numberOfCols), dtype='uint32')
        for i in xrange(4):
            _RGEN.initializeUInt32Array(patterns[i], 2)

        for i in xrange(10):
            x = numpy.zeros(tp.numberOfCols, dtype='uint32')
            _RGEN.initializeUInt32Array(x, 2)
            tp.infer(x)
            if i > 0:
                tp.checkPrediction2(patterns)
示例#3
0
    def assertTPsEqual(self, tp1, tp2):
        """Asserts that two TP instances are the same.

    This is temporarily disabled since it does not work with the C++
    implementation of the TP.
    """
        self.assertEqual(tp1, tp2, tp1.diff(tp2))
        self.assertTrue(fdrutilities.tpDiff2(tp1, tp2, 1, False))
示例#4
0
  def assertTPsEqual(self, tp1, tp2):
    """Asserts that two TP instances are the same.

    This is temporarily disabled since it does not work with the C++
    implementation of the TP.
    """
    self.assertEqual(tp1, tp2, tp1.diff(tp2))
    self.assertTrue(fdrutilities.tpDiff2(tp1, tp2, 1, False))
def assertNoTPDiffs(tps):
  """
  Check for diffs among the TP instances in the passed in tps dict and
  raise an assert if any are detected

  Parameters:
  ---------------------------------------------------------------------
  tps:                  dict of TP instances
  """

  if len(tps) == 1:
    return
  if len(tps) > 2:
    raise "Not implemented for more than 2 TPs"

  same = fdrutils.tpDiff2(*tps.values(), verbosity=VERBOSITY)
  assert(same)
  return
示例#6
0
def assertNoTPDiffs(tps):
    """
  Check for diffs among the TP instances in the passed in tps dict and
  raise an assert if any are detected

  Parameters:
  ---------------------------------------------------------------------
  tps:                  dict of TP instances
  """

    if len(tps) == 1:
        return
    if len(tps) > 2:
        raise "Not implemented for more than 2 TPs"

    same = fdrutils.tpDiff2(*tps.values(), verbosity=VERBOSITY)
    assert (same)
    return
示例#7
0
 def testIdenticalTps(self):
   self.assertTrue(fdrutils.tpDiff2(self.cppTp, self.pyTp))
示例#8
0
  def basicTest2(self, tp, numPatterns=100, numRepetitions=3, activity=15,
                 testTrimming=False, testRebuild=False):
    """Basic test (basic run of learning and inference)"""
    # Create PY TP object that mirrors the one sent in.
    tpPy = TP(numberOfCols=tp.numberOfCols, cellsPerColumn=tp.cellsPerColumn,
              initialPerm=tp.initialPerm, connectedPerm=tp.connectedPerm,
              minThreshold=tp.minThreshold, newSynapseCount=tp.newSynapseCount,
              permanenceInc=tp.permanenceInc, permanenceDec=tp.permanenceDec,
              permanenceMax=tp.permanenceMax, globalDecay=tp.globalDecay,
              activationThreshold=tp.activationThreshold,
              doPooling=tp.doPooling,
              segUpdateValidDuration=tp.segUpdateValidDuration,
              pamLength=tp.pamLength, maxAge=tp.maxAge,
              maxSeqLength=tp.maxSeqLength,
              maxSegmentsPerCell=tp.maxSegmentsPerCell,
              maxSynapsesPerSegment=tp.maxSynapsesPerSegment,
              seed=tp.seed, verbosity=tp.verbosity)

    # Ensure we are copying over learning states for TPDiff
    tp.retrieveLearningStates = True

    verbosity = VERBOSITY

    # Learn

    # Build up sequences
    sequence = fdrutils.generateCoincMatrix(nCoinc=numPatterns,
                                            length=tp.numberOfCols,
                                            activity=activity)
    for r in xrange(numRepetitions):
      for i in xrange(sequence.nRows()):

        #if i > 11:
        #  setVerbosity(6, tp, tpPy)

        if i % 10 == 0:
          tp.reset()
          tpPy.reset()

        if verbosity >= 2:
          print "\n\n    ===================================\nPattern:",
          print i, "Round:", r, "input:", sequence.getRow(i)

        y1 = tp.learn(sequence.getRow(i))
        y2 = tpPy.learn(sequence.getRow(i))

        # Ensure everything continues to work well even if we continuously
        # rebuild outSynapses structure
        if testRebuild:
          tp.cells4.rebuildOutSynapses()

        if testTrimming:
          tp.trimSegments()
          tpPy.trimSegments()

        if verbosity > 2:
          print "\n   ------  CPP states  ------ ",
          tp.printStates()
          print "\n   ------  PY states  ------ ",
          tpPy.printStates()
          if verbosity > 6:
            print "C++ cells: "
            tp.printCells()
            print "PY cells: "
            tpPy.printCells()

        if verbosity >= 3:
          print "Num segments in PY and C++", tpPy.getNumSegments(), \
              tp.getNumSegments()

        # Check if the two TP's are identical or not. This check is slow so
        # we do it every other iteration. Make it every iteration for debugging
        # as needed.
        self.assertTrue(fdrutils.tpDiff2(tp, tpPy, verbosity, False))

        # Check that outputs are identical
        self.assertLess(abs((y1 - y2).sum()), 3)

    print "Learning completed"

    self.assertTrue(fdrutils.tpDiff2(tp, tpPy, verbosity))

    # TODO: Need to check - currently failing this
    #checkCell0(tpPy)

    # Remove unconnected synapses and check TP's again

    # Test rebuild out synapses
    print "Rebuilding outSynapses"
    tp.cells4.rebuildOutSynapses()
    self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY))

    print "Trimming segments"
    tp.trimSegments()
    tpPy.trimSegments()
    self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY))

    # Save and reload after learning
    print "Pickling and unpickling"
    tp.makeCells4Ephemeral = False
    pickle.dump(tp, open("test_tp10x.pkl", "wb"))
    tp2 = pickle.load(open("test_tp10x.pkl"))
    self.assertTrue(fdrutils.tpDiff2(tp, tp2, VERBOSITY, checkStates=False))

    # Infer
    print "Testing inference"

    # Setup for inference
    tp.reset()
    tpPy.reset()
    setVerbosity(INFERENCE_VERBOSITY, tp, tpPy)

    patterns = numpy.zeros((40, tp.numberOfCols), dtype='uint32')
    for i in xrange(4):
      _RGEN.initializeUInt32Array(patterns[i], 2)

    for i, x in enumerate(patterns):

      x = numpy.zeros(tp.numberOfCols, dtype='uint32')
      _RGEN.initializeUInt32Array(x, 2)
      y = tp.infer(x)
      yPy = tpPy.infer(x)

      self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY, checkLearn=False))
      if abs((y - yPy).sum()) > 0:
        print "C++ output", y
        print "Py output", yPy
        assert False

      if i > 0:
        tp.checkPrediction2(patterns)
        tpPy.checkPrediction2(patterns)

    print "Inference completed"
    print "===================================="

    return tp, tpPy
示例#9
0
    def basicTest2(self,
                   tp,
                   numPatterns=100,
                   numRepetitions=3,
                   activity=15,
                   testTrimming=False,
                   testRebuild=False):
        """Basic test (basic run of learning and inference)"""
        # Create PY TP object that mirrors the one sent in.
        tpPy = TP(numberOfCols=tp.numberOfCols,
                  cellsPerColumn=tp.cellsPerColumn,
                  initialPerm=tp.initialPerm,
                  connectedPerm=tp.connectedPerm,
                  minThreshold=tp.minThreshold,
                  newSynapseCount=tp.newSynapseCount,
                  permanenceInc=tp.permanenceInc,
                  permanenceDec=tp.permanenceDec,
                  permanenceMax=tp.permanenceMax,
                  globalDecay=tp.globalDecay,
                  activationThreshold=tp.activationThreshold,
                  doPooling=tp.doPooling,
                  segUpdateValidDuration=tp.segUpdateValidDuration,
                  pamLength=tp.pamLength,
                  maxAge=tp.maxAge,
                  maxSeqLength=tp.maxSeqLength,
                  maxSegmentsPerCell=tp.maxSegmentsPerCell,
                  maxSynapsesPerSegment=tp.maxSynapsesPerSegment,
                  seed=tp.seed,
                  verbosity=tp.verbosity)

        # Ensure we are copying over learning states for TPDiff
        tp.retrieveLearningStates = True

        verbosity = VERBOSITY

        # Learn

        # Build up sequences
        sequence = fdrutils.generateCoincMatrix(nCoinc=numPatterns,
                                                length=tp.numberOfCols,
                                                activity=activity)
        for r in xrange(numRepetitions):
            for i in xrange(sequence.nRows()):

                #if i > 11:
                #  setVerbosity(6, tp, tpPy)

                if i % 10 == 0:
                    tp.reset()
                    tpPy.reset()

                if verbosity >= 2:
                    print "\n\n    ===================================\nPattern:",
                    print i, "Round:", r, "input:", sequence.getRow(i)

                y1 = tp.learn(sequence.getRow(i))
                y2 = tpPy.learn(sequence.getRow(i))

                # Ensure everything continues to work well even if we continuously
                # rebuild outSynapses structure
                if testRebuild:
                    tp.cells4.rebuildOutSynapses()

                if testTrimming:
                    tp.trimSegments()
                    tpPy.trimSegments()

                if verbosity > 2:
                    print "\n   ------  CPP states  ------ ",
                    tp.printStates()
                    print "\n   ------  PY states  ------ ",
                    tpPy.printStates()
                    if verbosity > 6:
                        print "C++ cells: "
                        tp.printCells()
                        print "PY cells: "
                        tpPy.printCells()

                if verbosity >= 3:
                    print "Num segments in PY and C++", tpPy.getNumSegments(), \
                        tp.getNumSegments()

                # Check if the two TP's are identical or not. This check is slow so
                # we do it every other iteration. Make it every iteration for debugging
                # as needed.
                self.assertTrue(fdrutils.tpDiff2(tp, tpPy, verbosity, False))

                # Check that outputs are identical
                self.assertLess(abs((y1 - y2).sum()), 3)

        print "Learning completed"

        self.assertTrue(fdrutils.tpDiff2(tp, tpPy, verbosity))

        # TODO: Need to check - currently failing this
        #checkCell0(tpPy)

        # Remove unconnected synapses and check TP's again

        # Test rebuild out synapses
        print "Rebuilding outSynapses"
        tp.cells4.rebuildOutSynapses()
        self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY))

        print "Trimming segments"
        tp.trimSegments()
        tpPy.trimSegments()
        self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY))

        # Save and reload after learning
        print "Pickling and unpickling"
        tp.makeCells4Ephemeral = False
        pickle.dump(tp, open("test_tp10x.pkl", "wb"))
        tp2 = pickle.load(open("test_tp10x.pkl"))
        self.assertTrue(fdrutils.tpDiff2(tp, tp2, VERBOSITY,
                                         checkStates=False))

        # Infer
        print "Testing inference"

        # Setup for inference
        tp.reset()
        tpPy.reset()
        setVerbosity(INFERENCE_VERBOSITY, tp, tpPy)

        patterns = numpy.zeros((40, tp.numberOfCols), dtype='uint32')
        for i in xrange(4):
            _RGEN.initializeUInt32Array(patterns[i], 2)

        for i, x in enumerate(patterns):

            x = numpy.zeros(tp.numberOfCols, dtype='uint32')
            _RGEN.initializeUInt32Array(x, 2)
            y = tp.infer(x)
            yPy = tpPy.infer(x)

            self.assertTrue(
                fdrutils.tpDiff2(tp, tpPy, VERBOSITY, checkLearn=False))
            if abs((y - yPy).sum()) > 0:
                print "C++ output", y
                print "Py output", yPy
                assert False

            if i > 0:
                tp.checkPrediction2(patterns)
                tpPy.checkPrediction2(patterns)

        print "Inference completed"
        print "===================================="

        return tp, tpPy
示例#10
0
 def testIdenticalTps(self):
     self.assertTrue(fdrutils.tpDiff2(self.cppTp, self.pyTp))
示例#11
0
    def testCLAAndSPTPFlow(self, trainIterations=500, testIterations=100):
        """
    The test creates two networks, trains their spatial and temporal poolers, and
    ensures they return identical results for bottom-up and top-down flows.
  
    The two networks are:
  
    a) an LPF-style network using the LPF regions RecordSensor and CLARegion.
  
    b) an OPF-style network using RecordSensor, SPRegion and TPRegion.
  
    The test trains each network for 500 iterations (on the gym.csv file) and
    runs inference for 100 iterations. During inference the outputs of both
    networks are tested to ensure they are identical.  The test also ensures
    the TP instances of CLARegion and TPRegion are identical after training.
    """

        print "Creating network..."

        netLPF = _createLPFNetwork(addSP=True, addTP=True)
        sensorLPF = netLPF.regions['sensor']
        pySensorLPF = netLPF.regions['sensor'].getSelf()
        encoderLPF = pySensorLPF.encoder
        level1LPF = netLPF.regions['level1']

        netOPF = _createOPFNetwork(addSP=True, addTP=True)
        sensorOPF = netOPF.regions['sensor']
        level1SP = netOPF.regions['level1SP']
        level1TP = netOPF.regions['level1TP']

        # ==========================================================================
        # Train the LPF network for 500 iterations
        print "Training the LPF network for %d iterations" % (
            trainIterations, )
        level1LPF.setParameter('learningMode', 1)
        level1LPF.setParameter('inferenceMode', 0)
        level1LPF.setParameter('trainingStep', 'spatial')
        netLPF.run(trainIterations)
        level1LPF.setParameter('trainingStep', 'temporal')
        netLPF.run(trainIterations)
        level1LPF.setParameter('learningMode', 0)
        level1LPF.setParameter('inferenceMode', 1)

        # ==========================================================================
        # Train the OPF network for 500 iterations
        print "Training the OPF network for %d iterations" % (
            trainIterations, )
        # Train SP for 1000 iterations. Here we call compute on regions
        # explicitly. Note that prepareInputs must be called on a region before
        # its compute is called.
        netOPF.initialize()
        level1SP.setParameter('learningMode', 1)
        level1SP.setParameter('inferenceMode', 0)
        for i in range(trainIterations):
            netOPF.regions['sensor'].compute()
            level1SP.prepareInputs()
            level1SP.compute()
        level1SP.setParameter('learningMode', 0)
        level1SP.setParameter('inferenceMode', 1)

        # Train TP for 500 iterations
        print "Training TP"
        level1TP.setParameter('learningMode', 1)
        level1TP.setParameter('inferenceMode', 0)
        netOPF.run(trainIterations)
        level1TP.setParameter('learningMode', 0)
        level1TP.setParameter('inferenceMode', 1)

        # To match CLARegion we need to explicitly call finishLearning
        level1TP.executeCommand(['finishLearning'])

        # ==========================================================================
        # Get the TP instances from the two networks and compare them using tpDiff
        claSelf = netLPF.regions['level1'].getSelf()
        tp1 = claSelf._tfdr  # pylint: disable=W0212
        level1TPSelf = netOPF.regions['level1TP'].getSelf()
        tp2 = level1TPSelf._tfdr  # pylint: disable=W0212
        if not fdrutils.tpDiff2(tp1, tp2, _VERBOSITY, False):
            print "Trained temporal poolers are different!"
            self.assertTrue(False, "Trained temporal poolers are different!")
        print "Trained temporal poolers are identical\n"

        # ==========================================================================
        print "Running inference on the two networks for %d iterations" \
                                                            % (testIterations,)

        prevSensorTDOutputLPF = None
        prevSensorTDOutputOPF = None
        for i in xrange(testIterations):
            netLPF.run(1)
            outputLPF = level1LPF.getOutputData("bottomUpOut")

            # This is the current of top-down reconstruction in LPF
            # 1) A callback function at the end of each iteration
            #   - logOutputsToFileIter
            # accesses the "topDownOut" in CLA Region and logs it to a file on disk
            temporalTDOutputLPF = level1LPF.getOutputData("topDownOut")
            # 2) Encoder top-down compute on spReconstructedIn is run in
            # postprocess/inputpredictionstats.py. The encoder is accessed directly
            sensorTDOutputLPF = encoderLPF.topDownCompute(temporalTDOutputLPF)
            # 3) The bottom-up input is extracted from sensor
            if prevSensorTDOutputLPF is None:
                prevSensorTDOutputLPF = sensorTDOutputLPF

            sensorBUInputLPF = sensorLPF.getOutputData("sourceOut")
            #print sensorBUInputLPF, prevSensorTDOutputLPF, sensorTDOutputLPF

            # This is a single OPF iteration
            # netOPF.run(1)
            # Reconstruction is now done as part of the top-down flow

            #Bottom-up flow
            sensorOPF.setParameter('topDownMode', False)
            sensorOPF.prepareInputs()
            sensorOPF.compute()
            level1SP.setParameter('topDownMode', False)
            level1SP.prepareInputs()
            level1SP.compute()
            level1TP.setParameter('topDownMode', False)
            level1TP.prepareInputs()
            level1TP.compute()

            #Top-down flow
            level1TP.setParameter('topDownMode', True)
            level1TP.prepareInputs()
            level1TP.compute()
            level1SP.setParameter('topDownMode', True)
            level1SP.prepareInputs()
            level1SP.compute()
            sensorOPF.setParameter('topDownMode', True)
            sensorOPF.prepareInputs()
            sensorOPF.compute()

            outputOPF = level1TP.getOutputData("bottomUpOut")

            temporalTDOutputOPF = level1SP.getOutputData("temporalTopDownOut")
            sensorBUInputOPF = sensorOPF.getOutputData("sourceOut")
            sensorTDOutputOPF = sensorOPF.getOutputData("temporalTopDownOut")
            if prevSensorTDOutputOPF is None:
                prevSensorTDOutputOPF = sensorTDOutputOPF

            #print sensorBUInputLPF, prevSensorTDOutputLPF, sensorTDOutputLPF

            self.assertTrue(numpy.allclose(temporalTDOutputLPF, temporalTDOutputOPF) \
                    or numpy.allclose(sensorBUInputLPF, sensorBUInputOPF) or \
                    numpy.allclose(sensorTDOutputLPF, sensorTDOutputOPF) or \
                    numpy.allclose(outputLPF, outputOPF))

            lpfHash = outputLPF.nonzero()[0].sum()
            opfHash = outputOPF.nonzero()[0].sum()

            self.assertEqual(lpfHash, opfHash, "Outputs for iteration %d unequal!" \
                             % (i))

            prevSensorTDOutputLPF = sensorTDOutputLPF
            prevSensorTDOutputOPF = sensorTDOutputOPF
示例#12
0
    def testCLAAndSPTP(self):
        """
    The test creates two networks, trains their spatial and temporal poolers, and
    ensures they return identical results. The two networks are:
  
    a) an LPF-style network using the LPF regions RecordSensor and CLARegion.
  
    b) an OPF-style network using RecordSensor, SPRegion and TPRegion.
  
    The test trains each network for 500 iterations (on the gym.csv file) and
    runs inference for 100 iterations. During inference the outputs of both
    networks are tested to ensure they are identical.  The test also ensures
    the TP instances of CLARegion and TPRegion are identical after training.
    """

        print "Creating network..."

        netLPF = _createLPFNetwork(addSP=True, addTP=True)
        netOPF = _createOPFNetwork(addSP=True, addTP=True)

        # ==========================================================================
        # Train the LPF network for 500 iterations
        print "Training the LPF network for 500 iterations"
        level1LPF = netLPF.regions['level1']
        level1LPF.setParameter('learningMode', 1)
        level1LPF.setParameter('inferenceMode', 0)
        level1LPF.setParameter('trainingStep', 'spatial')
        netLPF.run(500)
        level1LPF.setParameter('trainingStep', 'temporal')
        netLPF.run(500)
        level1LPF.setParameter('learningMode', 0)
        level1LPF.setParameter('inferenceMode', 1)

        # ==========================================================================
        # Train the OPF network for 500 iterations
        print "Training the OPF network for 500 iterations"

        # Train SP for 1000 iterations. Here we set the maxEnabledPhase to exclude
        # the TPRegion.
        netOPF.initialize()
        level1SP = netOPF.regions['level1SP']
        level1SP.setParameter('learningMode', 1)
        level1SP.setParameter('inferenceMode', 0)
        netOPF.setMaxEnabledPhase(1)
        netOPF.run(500)
        level1SP.setParameter('learningMode', 0)
        level1SP.setParameter('inferenceMode', 1)

        # Train TP for 500 iterations. Here we set the maxEnabledPhase to include
        # all regions
        print "Training TP"
        level1TP = netOPF.regions['level1TP']
        level1TP.setParameter('learningMode', 1)
        level1TP.setParameter('inferenceMode', 0)
        netOPF.setMaxEnabledPhase(netOPF.maxPhase)
        netOPF.run(500)
        level1TP.setParameter('learningMode', 0)
        level1TP.setParameter('inferenceMode', 1)

        # To match CLARegion we need to explicitly call finishLearning
        level1TP.executeCommand(['finishLearning'])

        # ==========================================================================
        # Get the TP instances from the two networks and compare them using tpDiff
        claSelf = netLPF.regions['level1'].getSelf()
        tp1 = claSelf._tfdr  # pylint: disable=W0212
        level1TPSelf = netOPF.regions['level1TP'].getSelf()
        tp2 = level1TPSelf._tfdr  # pylint: disable=W0212
        if not fdrutils.tpDiff2(tp1, tp2, _VERBOSITY, False):
            print "Trained temporal poolers are different!"
            self.assertTrue(False, "Trained temporal poolers are different!")
        else:
            print "Trained temporal poolers are identical\n"

        # ==========================================================================
        print "Running inference on the two networks for 100 iterations"

        for i in xrange(100):
            netLPF.run(1)
            netOPF.run(1)
            outputLPF = level1LPF.getOutputData("bottomUpOut")
            outputOPF = level1TP.getOutputData("bottomUpOut")

            lpfHash = outputLPF.nonzero()[0].sum()
            opfHash = outputOPF.nonzero()[0].sum()

            self.assertEqual(lpfHash, opfHash,"Outputs for iteration %d unequal!" \
                             % (i))
示例#13
0
    def _testSegmentLearningSequence(self,
                                     tps,
                                     trainingSequences,
                                     testSequences,
                                     doResets=True):
        """Train the given TP once on the entire training set. on the Test a single
    set of sequences once and check that individual predictions reflect the true
    relative frequencies. Return a success code. Success code is 1 for pass, 0
    for fail."""

        # If no test sequence is specified, use the first training sequence
        if testSequences == None:
            testSequences = trainingSequences

        cppTP, pyTP = tps[0], tps[1]

        if cppTP is not None:
            assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity) == True

        #--------------------------------------------------------------------------
        # Learn
        if g_options.verbosity > 0:
            print "============= Training ================="
            print "TP parameters:"
            print "CPP"
            if cppTP is not None:
                print cppTP.printParameters()
            print "\nPY"
            print pyTP.printParameters()

        for sequenceNum, trainingSequence in enumerate(trainingSequences):

            if g_options.verbosity > 1:
                print "============= New sequence ================="

            if doResets:
                if cppTP is not None:
                    cppTP.reset()
                pyTP.reset()

            for t, x in enumerate(trainingSequence):

                if g_options.verbosity > 1:
                    print "Time step", t, "sequence number", sequenceNum
                    print "Input: ", pyTP.printInput(x)
                    print "NNZ:", x.nonzero()

                x = numpy.array(x).astype('float32')
                if cppTP is not None:
                    cppTP.learn(x)
                pyTP.learn(x)

                if cppTP is not None:
                    assert fdrutils.tpDiff2(cppTP,
                                            pyTP,
                                            g_options.verbosity,
                                            relaxSegmentTests=False) == True

                if g_options.verbosity > 2:
                    if cppTP is not None:
                        print "CPP"
                        cppTP.printStates(
                            printPrevious=(g_options.verbosity > 4))
                    print "\nPY"
                    pyTP.printStates(printPrevious=(g_options.verbosity > 4))
                    print

            if g_options.verbosity > 4:
                print "Sequence finished. Complete state after sequence"
                if cppTP is not None:
                    print "CPP"
                    cppTP.printCells()
                print "\nPY"
                pyTP.printCells()
                print

        if g_options.verbosity > 2:
            print "Calling trim segments"

        if cppTP is not None:
            nSegsRemovedCPP, nSynsRemovedCPP = cppTP.trimSegments()
        nSegsRemoved, nSynsRemoved = pyTP.trimSegments()
        if cppTP is not None:
            assert nSegsRemovedCPP == nSegsRemoved
            assert nSynsRemovedCPP == nSynsRemoved

        if cppTP is not None:
            assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity) == True

        print "Training completed. Stats:"
        info = pyTP.getSegmentInfo()
        print "  nSegments:", info[0]
        print "  nSynapses:", info[1]
        if g_options.verbosity > 3:
            print "Complete state:"
            if cppTP is not None:
                print "CPP"
                cppTP.printCells()
            print "\nPY"
            pyTP.printCells()

        #---------------------------------------------------------------------------
        # Infer
        if g_options.verbosity > 1:
            print "============= Inference ================="

        if cppTP is not None:
            cppTP.collectStats = True
        pyTP.collectStats = True

        nPredictions = 0
        cppNumCorrect, pyNumCorrect = 0, 0

        for sequenceNum, testSequence in enumerate(testSequences):

            if g_options.verbosity > 1:
                print "============= New sequence ================="

            slen = len(testSequence)

            if doResets:
                if cppTP is not None:
                    cppTP.reset()
                pyTP.reset()

            for t, x in enumerate(testSequence):

                if g_options.verbosity >= 2:
                    print "Time step", t, '\nInput:'
                    pyTP.printInput(x)

                if cppTP is not None:
                    cppTP.infer(x)
                pyTP.infer(x)

                if cppTP is not None:
                    assert fdrutils.tpDiff2(cppTP, pyTP,
                                            g_options.verbosity) == True

                if g_options.verbosity > 2:
                    if cppTP is not None:
                        print "CPP"
                        cppTP.printStates(
                            printPrevious=(g_options.verbosity > 4),
                            printLearnState=False)
                    print "\nPY"
                    pyTP.printStates(printPrevious=(g_options.verbosity > 4),
                                     printLearnState=False)

                if cppTP is not None:
                    cppScores = cppTP.getStats()
                pyScores = pyTP.getStats()

                if g_options.verbosity >= 2:
                    if cppTP is not None:
                        print "CPP"
                        print cppScores
                    print "\nPY"
                    print pyScores

                if t < slen - 1 and t > pyTP.burnIn:
                    nPredictions += 1
                    if cppTP is not None:
                        if cppScores['curPredictionScore2'] > 0.3:
                            cppNumCorrect += 1
                    if pyScores['curPredictionScore2'] > 0.3:
                        pyNumCorrect += 1

        # Check that every inference was correct, excluding the very last inference
        if cppTP is not None:
            cppScores = cppTP.getStats()
        pyScores = pyTP.getStats()

        passTest = False
        if cppTP is not None:
            if cppNumCorrect == nPredictions and pyNumCorrect == nPredictions:
                passTest = True
        else:
            if pyNumCorrect == nPredictions:
                passTest = True

        if not passTest:
            print "CPP correct predictions:", cppNumCorrect
            print "PY correct predictions:", pyNumCorrect
            print "Total predictions:", nPredictions

        return passTest
示例#14
0
    def _testSegmentLearningSequence(self, tps, trainingSequences, testSequences, doResets=True):

        """Train the given TP once on the entire training set. on the Test a single
    set of sequences once and check that individual predictions reflect the true
    relative frequencies. Return a success code. Success code is 1 for pass, 0
    for fail."""

        # If no test sequence is specified, use the first training sequence
        if testSequences == None:
            testSequences = trainingSequences

        cppTP, pyTP = tps[0], tps[1]

        if cppTP is not None:
            assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity) == True

        # --------------------------------------------------------------------------
        # Learn
        if g_options.verbosity > 0:
            print "============= Training ================="
            print "TP parameters:"
            print "CPP"
            if cppTP is not None:
                print cppTP.printParameters()
            print "\nPY"
            print pyTP.printParameters()

        for sequenceNum, trainingSequence in enumerate(trainingSequences):

            if g_options.verbosity > 1:
                print "============= New sequence ================="

            if doResets:
                if cppTP is not None:
                    cppTP.reset()
                pyTP.reset()

            for t, x in enumerate(trainingSequence):

                if g_options.verbosity > 1:
                    print "Time step", t, "sequence number", sequenceNum
                    print "Input: ", pyTP.printInput(x)
                    print "NNZ:", x.nonzero()

                x = numpy.array(x).astype("float32")
                if cppTP is not None:
                    cppTP.learn(x)
                pyTP.learn(x)

                if cppTP is not None:
                    assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity, relaxSegmentTests=False) == True

                if g_options.verbosity > 2:
                    if cppTP is not None:
                        print "CPP"
                        cppTP.printStates(printPrevious=(g_options.verbosity > 4))
                    print "\nPY"
                    pyTP.printStates(printPrevious=(g_options.verbosity > 4))
                    print

            if g_options.verbosity > 4:
                print "Sequence finished. Complete state after sequence"
                if cppTP is not None:
                    print "CPP"
                    cppTP.printCells()
                print "\nPY"
                pyTP.printCells()
                print

        if g_options.verbosity > 2:
            print "Calling trim segments"

        if cppTP is not None:
            nSegsRemovedCPP, nSynsRemovedCPP = cppTP.trimSegments()
        nSegsRemoved, nSynsRemoved = pyTP.trimSegments()
        if cppTP is not None:
            assert nSegsRemovedCPP == nSegsRemoved
            assert nSynsRemovedCPP == nSynsRemoved

        if cppTP is not None:
            assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity) == True

        print "Training completed. Stats:"
        info = pyTP.getSegmentInfo()
        print "  nSegments:", info[0]
        print "  nSynapses:", info[1]
        if g_options.verbosity > 3:
            print "Complete state:"
            if cppTP is not None:
                print "CPP"
                cppTP.printCells()
            print "\nPY"
            pyTP.printCells()

        # ---------------------------------------------------------------------------
        # Infer
        if g_options.verbosity > 1:
            print "============= Inference ================="

        if cppTP is not None:
            cppTP.collectStats = True
        pyTP.collectStats = True

        nPredictions = 0
        cppNumCorrect, pyNumCorrect = 0, 0

        for sequenceNum, testSequence in enumerate(testSequences):

            if g_options.verbosity > 1:
                print "============= New sequence ================="

            slen = len(testSequence)

            if doResets:
                if cppTP is not None:
                    cppTP.reset()
                pyTP.reset()

            for t, x in enumerate(testSequence):

                if g_options.verbosity >= 2:
                    print "Time step", t, "\nInput:"
                    pyTP.printInput(x)

                if cppTP is not None:
                    cppTP.infer(x)
                pyTP.infer(x)

                if cppTP is not None:
                    assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity) == True

                if g_options.verbosity > 2:
                    if cppTP is not None:
                        print "CPP"
                        cppTP.printStates(printPrevious=(g_options.verbosity > 4), printLearnState=False)
                    print "\nPY"
                    pyTP.printStates(printPrevious=(g_options.verbosity > 4), printLearnState=False)

                if cppTP is not None:
                    cppScores = cppTP.getStats()
                pyScores = pyTP.getStats()

                if g_options.verbosity >= 2:
                    if cppTP is not None:
                        print "CPP"
                        print cppScores
                    print "\nPY"
                    print pyScores

                if t < slen - 1 and t > pyTP.burnIn:
                    nPredictions += 1
                    if cppTP is not None:
                        if cppScores["curPredictionScore2"] > 0.3:
                            cppNumCorrect += 1
                    if pyScores["curPredictionScore2"] > 0.3:
                        pyNumCorrect += 1

        # Check that every inference was correct, excluding the very last inference
        if cppTP is not None:
            cppScores = cppTP.getStats()
        pyScores = pyTP.getStats()

        passTest = False
        if cppTP is not None:
            if cppNumCorrect == nPredictions and pyNumCorrect == nPredictions:
                passTest = True
        else:
            if pyNumCorrect == nPredictions:
                passTest = True

        if not passTest:
            print "CPP correct predictions:", cppNumCorrect
            print "PY correct predictions:", pyNumCorrect
            print "Total predictions:", nPredictions

        return passTest
示例#15
0
    def testCLAAndSPTPFlow(self, trainIterations=500, testIterations=100):
        """
    The test creates two networks, trains their spatial and temporal poolers, and
    ensures they return identical results for bottom-up and top-down flows.
  
    The two networks are:
  
    a) an LPF-style network using the LPF regions RecordSensor and CLARegion.
  
    b) an OPF-style network using RecordSensor, SPRegion and TPRegion.
  
    The test trains each network for 500 iterations (on the gym.csv file) and
    runs inference for 100 iterations. During inference the outputs of both
    networks are tested to ensure they are identical.  The test also ensures
    the TP instances of CLARegion and TPRegion are identical after training.
    """

        print "Creating network..."

        netLPF = _createLPFNetwork(addSP=True, addTP=True)
        sensorLPF = netLPF.regions["sensor"]
        pySensorLPF = netLPF.regions["sensor"].getSelf()
        encoderLPF = pySensorLPF.encoder
        level1LPF = netLPF.regions["level1"]

        netOPF = _createOPFNetwork(addSP=True, addTP=True)
        sensorOPF = netOPF.regions["sensor"]
        level1SP = netOPF.regions["level1SP"]
        level1TP = netOPF.regions["level1TP"]

        # ==========================================================================
        # Train the LPF network for 500 iterations
        print "Training the LPF network for %d iterations" % (trainIterations,)
        level1LPF.setParameter("learningMode", 1)
        level1LPF.setParameter("inferenceMode", 0)
        level1LPF.setParameter("trainingStep", "spatial")
        netLPF.run(trainIterations)
        level1LPF.setParameter("trainingStep", "temporal")
        netLPF.run(trainIterations)
        level1LPF.setParameter("learningMode", 0)
        level1LPF.setParameter("inferenceMode", 1)

        # ==========================================================================
        # Train the OPF network for 500 iterations
        print "Training the OPF network for %d iterations" % (trainIterations,)
        # Train SP for 1000 iterations. Here we call compute on regions
        # explicitly. Note that prepareInputs must be called on a region before
        # its compute is called.
        netOPF.initialize()
        level1SP.setParameter("learningMode", 1)
        level1SP.setParameter("inferenceMode", 0)
        for i in range(trainIterations):
            netOPF.regions["sensor"].compute()
            level1SP.prepareInputs()
            level1SP.compute()
        level1SP.setParameter("learningMode", 0)
        level1SP.setParameter("inferenceMode", 1)

        # Train TP for 500 iterations
        print "Training TP"
        level1TP.setParameter("learningMode", 1)
        level1TP.setParameter("inferenceMode", 0)
        netOPF.run(trainIterations)
        level1TP.setParameter("learningMode", 0)
        level1TP.setParameter("inferenceMode", 1)

        # To match CLARegion we need to explicitly call finishLearning
        level1TP.executeCommand(["finishLearning"])

        # ==========================================================================
        # Get the TP instances from the two networks and compare them using tpDiff
        claSelf = netLPF.regions["level1"].getSelf()
        tp1 = claSelf._tfdr  # pylint: disable=W0212
        level1TPSelf = netOPF.regions["level1TP"].getSelf()
        tp2 = level1TPSelf._tfdr  # pylint: disable=W0212
        if not fdrutils.tpDiff2(tp1, tp2, _VERBOSITY, False):
            print "Trained temporal poolers are different!"
            self.assertTrue(False, "Trained temporal poolers are different!")
        print "Trained temporal poolers are identical\n"

        # ==========================================================================
        print "Running inference on the two networks for %d iterations" % (testIterations,)

        prevSensorTDOutputLPF = None
        prevSensorTDOutputOPF = None
        for i in xrange(testIterations):
            netLPF.run(1)
            outputLPF = level1LPF.getOutputData("bottomUpOut")

            # This is the current of top-down reconstruction in LPF
            # 1) A callback function at the end of each iteration
            #   - logOutputsToFileIter
            # accesses the "topDownOut" in CLA Region and logs it to a file on disk
            temporalTDOutputLPF = level1LPF.getOutputData("topDownOut")
            # 2) Encoder top-down compute on spReconstructedIn is run in
            # postprocess/inputpredictionstats.py. The encoder is accessed directly
            sensorTDOutputLPF = encoderLPF.topDownCompute(temporalTDOutputLPF)
            # 3) The bottom-up input is extracted from sensor
            if prevSensorTDOutputLPF is None:
                prevSensorTDOutputLPF = sensorTDOutputLPF

            sensorBUInputLPF = sensorLPF.getOutputData("sourceOut")
            # print sensorBUInputLPF, prevSensorTDOutputLPF, sensorTDOutputLPF

            # This is a single OPF iteration
            # netOPF.run(1)
            # Reconstruction is now done as part of the top-down flow

            # Bottom-up flow
            sensorOPF.setParameter("topDownMode", False)
            sensorOPF.prepareInputs()
            sensorOPF.compute()
            level1SP.setParameter("topDownMode", False)
            level1SP.prepareInputs()
            level1SP.compute()
            level1TP.setParameter("topDownMode", False)
            level1TP.prepareInputs()
            level1TP.compute()

            # Top-down flow
            level1TP.setParameter("topDownMode", True)
            level1TP.prepareInputs()
            level1TP.compute()
            level1SP.setParameter("topDownMode", True)
            level1SP.prepareInputs()
            level1SP.compute()
            sensorOPF.setParameter("topDownMode", True)
            sensorOPF.prepareInputs()
            sensorOPF.compute()

            outputOPF = level1TP.getOutputData("bottomUpOut")

            temporalTDOutputOPF = level1SP.getOutputData("temporalTopDownOut")
            sensorBUInputOPF = sensorOPF.getOutputData("sourceOut")
            sensorTDOutputOPF = sensorOPF.getOutputData("temporalTopDownOut")
            if prevSensorTDOutputOPF is None:
                prevSensorTDOutputOPF = sensorTDOutputOPF

            # print sensorBUInputLPF, prevSensorTDOutputLPF, sensorTDOutputLPF

            self.assertTrue(
                numpy.allclose(temporalTDOutputLPF, temporalTDOutputOPF)
                or numpy.allclose(sensorBUInputLPF, sensorBUInputOPF)
                or numpy.allclose(sensorTDOutputLPF, sensorTDOutputOPF)
                or numpy.allclose(outputLPF, outputOPF)
            )

            lpfHash = outputLPF.nonzero()[0].sum()
            opfHash = outputOPF.nonzero()[0].sum()

            self.assertEqual(lpfHash, opfHash, "Outputs for iteration %d unequal!" % (i))

            prevSensorTDOutputLPF = sensorTDOutputLPF
            prevSensorTDOutputOPF = sensorTDOutputOPF
示例#16
0
    def testCLAAndSPTP(self):
        """
    The test creates two networks, trains their spatial and temporal poolers, and
    ensures they return identical results. The two networks are:
  
    a) an LPF-style network using the LPF regions RecordSensor and CLARegion.
  
    b) an OPF-style network using RecordSensor, SPRegion and TPRegion.
  
    The test trains each network for 500 iterations (on the gym.csv file) and
    runs inference for 100 iterations. During inference the outputs of both
    networks are tested to ensure they are identical.  The test also ensures
    the TP instances of CLARegion and TPRegion are identical after training.
    """

        print "Creating network..."

        netLPF = _createLPFNetwork(addSP=True, addTP=True)
        netOPF = _createOPFNetwork(addSP=True, addTP=True)

        # ==========================================================================
        # Train the LPF network for 500 iterations
        print "Training the LPF network for 500 iterations"
        level1LPF = netLPF.regions["level1"]
        level1LPF.setParameter("learningMode", 1)
        level1LPF.setParameter("inferenceMode", 0)
        level1LPF.setParameter("trainingStep", "spatial")
        netLPF.run(500)
        level1LPF.setParameter("trainingStep", "temporal")
        netLPF.run(500)
        level1LPF.setParameter("learningMode", 0)
        level1LPF.setParameter("inferenceMode", 1)

        # ==========================================================================
        # Train the OPF network for 500 iterations
        print "Training the OPF network for 500 iterations"

        # Train SP for 1000 iterations. Here we set the maxEnabledPhase to exclude
        # the TPRegion.
        netOPF.initialize()
        level1SP = netOPF.regions["level1SP"]
        level1SP.setParameter("learningMode", 1)
        level1SP.setParameter("inferenceMode", 0)
        netOPF.setMaxEnabledPhase(1)
        netOPF.run(500)
        level1SP.setParameter("learningMode", 0)
        level1SP.setParameter("inferenceMode", 1)

        # Train TP for 500 iterations. Here we set the maxEnabledPhase to include
        # all regions
        print "Training TP"
        level1TP = netOPF.regions["level1TP"]
        level1TP.setParameter("learningMode", 1)
        level1TP.setParameter("inferenceMode", 0)
        netOPF.setMaxEnabledPhase(netOPF.maxPhase)
        netOPF.run(500)
        level1TP.setParameter("learningMode", 0)
        level1TP.setParameter("inferenceMode", 1)

        # To match CLARegion we need to explicitly call finishLearning
        level1TP.executeCommand(["finishLearning"])

        # ==========================================================================
        # Get the TP instances from the two networks and compare them using tpDiff
        claSelf = netLPF.regions["level1"].getSelf()
        tp1 = claSelf._tfdr  # pylint: disable=W0212
        level1TPSelf = netOPF.regions["level1TP"].getSelf()
        tp2 = level1TPSelf._tfdr  # pylint: disable=W0212
        if not fdrutils.tpDiff2(tp1, tp2, _VERBOSITY, False):
            print "Trained temporal poolers are different!"
            self.assertTrue(False, "Trained temporal poolers are different!")
        else:
            print "Trained temporal poolers are identical\n"

        # ==========================================================================
        print "Running inference on the two networks for 100 iterations"

        for i in xrange(100):
            netLPF.run(1)
            netOPF.run(1)
            outputLPF = level1LPF.getOutputData("bottomUpOut")
            outputOPF = level1TP.getOutputData("bottomUpOut")

            lpfHash = outputLPF.nonzero()[0].sum()
            opfHash = outputOPF.nonzero()[0].sum()

            self.assertEqual(lpfHash, opfHash, "Outputs for iteration %d unequal!" % (i))