def testNetwork(testPath="mnist/testing", savedNetworkFile="mnist_net.nta"):
    net = Network(savedNetworkFile)
    sensor = net.regions['sensor']
    sp = net.regions["SP"]
    classifier = net.regions['classifier']

    print "Reading test images"
    sensor.executeCommand(["loadMultipleImages", testPath])
    numTestImages = sensor.getParameter('numImages')
    print "Number of test images", numTestImages

    start = time.time()

    # Various region parameters
    sensor.setParameter('explorer', 'Flash')
    classifier.setParameter('inferenceMode', 1)
    classifier.setParameter('learningMode', 0)
    sp.setParameter('inferenceMode', 1)
    sp.setParameter('learningMode', 0)

    numCorrect = 0
    for i in range(numTestImages):
        net.run(1)
        inferredCategory = classifier.getOutputData('categoriesOut').argmax()
        if sensor.getOutputData('categoryOut') == inferredCategory:
            numCorrect += 1
        if i % (numTestImages / 100) == 0:
            print "Iteration", i, "numCorrect=", numCorrect

    # Some interesting statistics
    print "Testing time:", time.time() - start
    print "Number of test images", numTestImages
    print "num correct=", numCorrect
    print "pct correct=", (100.0 * numCorrect) / numTestImages
Ejemplo n.º 2
0
  def testRunPCANode(self):
    from nupic.engine import *

    numpy.random.RandomState(37)

    inputSize = 8

    net = Network()
    Network.registerRegion(ImageSensor)
    net.addRegion('sensor', 'py.ImageSensor' ,
          '{ width: %d, height: %d }' % (inputSize, inputSize))

    params = """{bottomUpCount: %d,
              SVDSampleCount: 5,
              SVDDimCount: 2}""" % inputSize

    pca = net.addRegion('pca', 'py.PCANode', params)

    #nodeAbove = CreateNode("py.ImageSensor", phase=0, categoryOut=1, dataOut=3,
    #                       width=3, height=1)
    #net.addElement('nodeAbove', nodeAbove)

    linkParams = '{ mapping: in, rfSize: [%d, %d] }' % (inputSize, inputSize)
    net.link('sensor', 'pca', 'UniformLink', linkParams, 'dataOut', 'bottomUpIn')

    net.initialize()

    for i in range(10):
      pca.getSelf()._testInputs = numpy.random.random([inputSize])
      net.run(1)
Ejemplo n.º 3
0
    def testRunPCANode(self):
        from nupic.engine import *

        numpy.random.RandomState(37)

        inputSize = 8

        net = Network()
        Network.registerRegion(ImageSensor)
        net.addRegion('sensor', 'py.ImageSensor',
                      '{ width: %d, height: %d }' % (inputSize, inputSize))

        params = """{bottomUpCount: %d,
              SVDSampleCount: 5,
              SVDDimCount: 2}""" % inputSize

        pca = net.addRegion('pca', 'py.PCANode', params)

        #nodeAbove = CreateNode("py.ImageSensor", phase=0, categoryOut=1, dataOut=3,
        #                       width=3, height=1)
        #net.addElement('nodeAbove', nodeAbove)

        linkParams = '{ mapping: in, rfSize: [%d, %d] }' % (inputSize,
                                                            inputSize)
        net.link('sensor', 'pca', 'UniformLink', linkParams, 'dataOut',
                 'bottomUpIn')

        net.initialize()

        for i in range(10):
            pca.getSelf()._testInputs = numpy.random.random([inputSize])
            net.run(1)
Ejemplo n.º 4
0
def testNetwork(testPath, savedNetworkFile="mnist_net.nta"):
  net = Network(savedNetworkFile)
  sensor = net.regions["sensor"]
  sp = net.regions["SP"]
  classifier = net.regions["classifier"]

  print "Reading test images"
  sensor.executeCommand(["loadMultipleImages",testPath])
  numTestImages = sensor.getParameter("numImages")
  print "Number of test images",numTestImages

  start = time.time()

  # Various region parameters
  sensor.setParameter("explorer", yaml.dump(["RandomFlash",
                                             {"replacement": False}]))
  classifier.setParameter("inferenceMode", 1)
  classifier.setParameter("learningMode", 0)
  sp.setParameter("inferenceMode", 1)
  sp.setParameter("learningMode", 0)

  numCorrect = 0
  for i in range(numTestImages):
    net.run(1)
    inferredCategory = classifier.getOutputData("categoriesOut").argmax()
    if sensor.getOutputData("categoryOut") == inferredCategory:
      numCorrect += 1
    if i%(numTestImages/100)== 0:
      print "Iteration",i,"numCorrect=",numCorrect

  # Some interesting statistics
  print "Testing time:",time.time()-start
  print "Number of test images",numTestImages
  print "num correct=",numCorrect
  print "pct correct=",(100.0*numCorrect) / numTestImages
Ejemplo n.º 5
0
def runExperiment():
    Network.unregisterRegion("ImageSensor")
    Network.registerRegion(ImageSensor)
    Network.registerRegion(PCANode)
    inputSize = 8

    net = Network()
    sensor = net.addRegion(
        "sensor", "py.ImageSensor",
        "{ width: %d, height: %d }" % (inputSize, inputSize))

    params = ("{bottomUpCount: %s, "
              " SVDSampleCount: 5, "
              " SVDDimCount: 2}" % inputSize)

    pca = net.addRegion("pca", "py.PCANode", params)

    linkParams = "{ mapping: in, rfSize: [%d, %d] }" % (inputSize, inputSize)
    net.link("sensor", "pca", "UniformLink", linkParams, "dataOut",
             "bottomUpIn")

    net.initialize()

    for i in range(10):
        pca.getSelf()._testInputs = numpy.random.random([inputSize])
        net.run(1)
        print s.sendRequest("nodeOPrint pca_node")
def testNetwork(testPath="test_images/testing", savedNetworkFile="imageNet_net.nta"):
    net = Network(savedNetworkFile)
    sensor = net.regions["sensor"]
    sp = net.regions["SP"]
    classifier = net.regions["classifier"]

    print "Reading test images"
    sensor.executeCommand(["loadMultipleImages", testPath])
    numTestImages = sensor.getParameter("numImages")
    print "Number of test images", numTestImages

    start = time.time()

    # Various region parameters
    sensor.setParameter("explorer", "ExhaustiveSweep")
    classifier.setParameter("inferenceMode", 1)
    classifier.setParameter("learningMode", 0)
    sp.setParameter("inferenceMode", 1)
    sp.setParameter("learningMode", 0)

    numCorrect = 0
    for i in range(numTestImages):
        net.run(1)
        inferredCategory = classifier.getOutputData("categoriesOut").argmax()
        if sensor.getOutputData("categoryOut") == inferredCategory:
            numCorrect += 1
        if i % (numTestImages / 100) == 0:
            print "Iteration", i, "numCorrect=", numCorrect

    # Some interesting statistics
    print "Testing time:", time.time() - start
    print "Number of test images", numTestImages
    print "num correct=", numCorrect
    print "pct correct=", (100.0 * numCorrect) / numTestImages
Ejemplo n.º 7
0
def runExperiment():
  Network.unregisterRegion("ImageSensor")
  Network.registerRegion(ImageSensor)
  Network.registerRegion(PCANode)
  inputSize = 8

  net = Network()
  sensor = net.addRegion(
      "sensor", "py.ImageSensor" ,
      "{ width: %d, height: %d }" % (inputSize, inputSize))

  params = ("{bottomUpCount: %s, "
            " SVDSampleCount: 5, "
            " SVDDimCount: 2}" % inputSize)

  pca = net.addRegion("pca", "py.PCANode", params)

  linkParams = "{ mapping: in, rfSize: [%d, %d] }" % (inputSize, inputSize)
  net.link("sensor", "pca", "UniformLink", linkParams, "dataOut", "bottomUpIn")

  net.initialize()

  for i in range(10):
    pca.getSelf()._testInputs = numpy.random.random([inputSize])
    net.run(1)
    print s.sendRequest("nodeOPrint pca_node")
Ejemplo n.º 8
0
def testNetwork(testPath="mnist/testing", savedNetworkFile="mnist_net.nta"):
  net = Network(savedNetworkFile)
  sensor = net.regions['sensor']
  sp = net.regions["SP"]
  classifier = net.regions['classifier']

  print "Reading test images"
  sensor.executeCommand(["loadMultipleImages",testPath])
  numTestImages = sensor.getParameter('numImages')
  print "Number of test images",numTestImages

  start = time.time()

  # Various region parameters
  sensor.setParameter('explorer','Flash')
  classifier.setParameter('inferenceMode', 1)
  classifier.setParameter('learningMode', 0)
  sp.setParameter('inferenceMode', 1)
  sp.setParameter('learningMode', 0)

  numCorrect = 0
  for i in range(numTestImages):
    net.run(1)
    inferredCategory = classifier.getOutputData('categoriesOut').argmax()
    if sensor.getOutputData('categoryOut') == inferredCategory:
      numCorrect += 1
    if i%(numTestImages/10)== 0:
      print "Iteration",i,"numCorrect=",numCorrect

  # Some interesting statistics
  print "Testing time:",time.time()-start
  print "Number of test images",numTestImages
  print "num correct=",numCorrect
  print "pct correct=",(100.0*numCorrect) / numTestImages
Ejemplo n.º 9
0
 def testSaveAndReload(self):
   """
   This function tests saving and loading. It will train a network for 500
   iterations, then save it and reload it as a second network instance. It will
   then run both networks for 100 iterations and ensure they return identical
   results.
   """
 
   print "Creating network..."
 
   netOPF = _createOPFNetwork()
   level1OPF = netOPF.regions['level1SP']
 
   # ==========================================================================
   print "Training network for 500 iterations"
   level1OPF.setParameter('learningMode', 1)
   level1OPF.setParameter('inferenceMode', 0)
   netOPF.run(500)
   level1OPF.setParameter('learningMode', 0)
   level1OPF.setParameter('inferenceMode', 1)
 
   # ==========================================================================
   # Save network and reload as a second instance. We need to reset the data
   # source for the unsaved network so that both instances start at the same
   # place
   print "Saving and reload network"
   _, tmpNetworkFilename = _setupTempDirectory("trained.nta")
   netOPF.save(tmpNetworkFilename)
   netOPF2 = Network(tmpNetworkFilename)
   level1OPF2 = netOPF2.regions['level1SP']
 
   sensor = netOPF.regions['sensor'].getSelf()
   trainFile = resource_filename("nupic.datafiles", "extra/gym/gym.csv")
   sensor.dataSource = FileRecordStream(streamID=trainFile)
   sensor.dataSource.setAutoRewind(True)
 
   # ==========================================================================
   print "Running inference on the two networks for 100 iterations"
   for _ in xrange(100):
     netOPF2.run(1)
     netOPF.run(1)
     l1outputOPF2 = level1OPF2.getOutputData("bottomUpOut")
     l1outputOPF  = level1OPF.getOutputData("bottomUpOut")
     opfHash2 = l1outputOPF2.nonzero()[0].sum()
     opfHash  = l1outputOPF.nonzero()[0].sum()
 
     self.assertEqual(opfHash2, opfHash)
Ejemplo n.º 10
0
    def testSaveAndReload(self):
        """
    This function tests saving and loading. It will train a network for 500
    iterations, then save it and reload it as a second network instance. It will
    then run both networks for 100 iterations and ensure they return identical
    results.
    """

        print "Creating network..."

        netOPF = _createOPFNetwork()
        level1OPF = netOPF.regions['level1SP']

        # ==========================================================================
        print "Training network for 500 iterations"
        level1OPF.setParameter('learningMode', 1)
        level1OPF.setParameter('inferenceMode', 0)
        netOPF.run(500)
        level1OPF.setParameter('learningMode', 0)
        level1OPF.setParameter('inferenceMode', 1)

        # ==========================================================================
        # Save network and reload as a second instance. We need to reset the data
        # source for the unsaved network so that both instances start at the same
        # place
        print "Saving and reload network"
        _, tmpNetworkFilename = _setupTempDirectory("trained.nta")
        netOPF.save(tmpNetworkFilename)
        netOPF2 = Network(tmpNetworkFilename)
        level1OPF2 = netOPF2.regions['level1SP']

        sensor = netOPF.regions['sensor'].getSelf()
        trainFile = resource_filename("nupic.datafiles", "extra/gym/gym.csv")
        sensor.dataSource = FileRecordStream(streamID=trainFile)
        sensor.dataSource.setAutoRewind(True)

        # ==========================================================================
        print "Running inference on the two networks for 100 iterations"
        for _ in xrange(100):
            netOPF2.run(1)
            netOPF.run(1)
            l1outputOPF2 = level1OPF2.getOutputData("bottomUpOut")
            l1outputOPF = level1OPF.getOutputData("bottomUpOut")
            opfHash2 = l1outputOPF2.nonzero()[0].sum()
            opfHash = l1outputOPF.nonzero()[0].sum()

            self.assertEqual(opfHash2, opfHash)
  def testOverlap(self):
    """Create a simple network to test the region."""

    rawParams = {"outputWidth": 8 * 2048}
    net = Network()
    rawSensor = net.addRegion("raw", "py.RawSensor", json.dumps(rawParams))
    l2c = net.addRegion("L2", "py.ColumnPoolerRegion", "")
    net.link("raw", "L2", "UniformLink", "")

    self.assertEqual(rawSensor.getParameter("outputWidth"),
                     l2c.getParameter("inputWidth"),
                     "Incorrect outputWidth parameter")

    rawSensorPy = rawSensor.getSelf()
    rawSensorPy.addDataToQueue([2, 4, 6], 0, 42)
    rawSensorPy.addDataToQueue([2, 42, 1023], 1, 43)
    rawSensorPy.addDataToQueue([18, 19, 20], 0, 44)

    # Run the network and check outputs are as expected
    net.run(3)
Ejemplo n.º 12
0
  def testNetworkCreate(self):
    """Create a simple network to test the region."""

    rawParams = {"outputWidth": 16*2048}
    net = Network()
    rawSensor = net.addRegion("raw","py.RawSensor", json.dumps(rawParams))
    l2c = net.addRegion("L2", "py.L2Column", "")
    net.link("raw", "L2", "UniformLink", "")

    self.assertEqual(rawSensor.getParameter("outputWidth"),
                     l2c.getParameter("inputWidth"),
                     "Incorrect outputWidth parameter")

    rawSensorPy = rawSensor.getSelf()
    rawSensorPy.addDataToQueue([2, 4, 6], 0, 42)
    rawSensorPy.addDataToQueue([2, 42, 1023], 1, 43)
    rawSensorPy.addDataToQueue([18, 19, 20], 0, 44)

    # Run the network and check outputs are as expected
    net.run(3)
Ejemplo n.º 13
0
    def testSensor(self):
        # Create a simple network to test the sensor
        params = {
            "activeBits": self.encoder.w,
            "outputWidth": self.encoder.n,
            "radius": 2,
            "verbosity": self.encoder.verbosity,
        }
        net = Network()
        region = net.addRegion("coordinate", "py.CoordinateSensorRegion", json.dumps(params))
        vfe = net.addRegion("output", "VectorFileEffector", "")
        net.link("coordinate", "output", "UniformLink", "")

        self.assertEqual(region.getParameter("outputWidth"), self.encoder.n, "Incorrect outputWidth parameter")

        # Add vectors to the queue using two different add methods. Later we
        # will check to ensure these are actually output properly.
        region.executeCommand(["addDataToQueue", "[2, 4, 6]", "0", "42"])
        regionPy = region.getSelf()
        regionPy.addDataToQueue([2, 42, 1023], 1, 43)
        regionPy.addDataToQueue([18, 19, 20], 0, 44)

        # Set an output file before we run anything
        vfe.setParameter("outputFile", os.path.join(self.tmpDir, "temp.csv"))

        # Run the network and check outputs are as expected
        net.run(1)
        expected = self.encoder.encode((numpy.array([2, 4, 6]), params["radius"]))
        actual = region.getOutputData("dataOut")
        self.assertEqual(actual.sum(), expected.sum(), "Value of dataOut incorrect")
        self.assertEqual(region.getOutputData("resetOut"), 0, "Value of resetOut incorrect")
        self.assertEqual(region.getOutputData("sequenceIdOut"), 42, "Value of sequenceIdOut incorrect")

        net.run(1)
        expected = self.encoder.encode((numpy.array([2, 42, 1023]), params["radius"]))
        actual = region.getOutputData("dataOut")
        self.assertEqual(actual.sum(), expected.sum(), "Value of dataOut incorrect")
        self.assertEqual(region.getOutputData("resetOut"), 1, "Value of resetOut incorrect")
        self.assertEqual(region.getOutputData("sequenceIdOut"), 43, "Value of sequenceIdOut incorrect")

        # Make sure we can save and load the network after running
        net.save(os.path.join(self.tmpDir, "coordinateNetwork.nta"))
        net2 = Network(os.path.join(self.tmpDir, "coordinateNetwork.nta"))
        region2 = net2.regions.get("coordinate")
        vfe2 = net2.regions.get("output")

        # Ensure parameters are preserved
        self.assertEqual(region2.getParameter("outputWidth"), self.encoder.n, "Incorrect outputWidth parameter")

        # Ensure the queue is preserved through save/load
        vfe2.setParameter("outputFile", os.path.join(self.tmpDir, "temp.csv"))
        net2.run(1)
        expected = self.encoder.encode((numpy.array([18, 19, 20]), params["radius"]))
        actual = region2.getOutputData("dataOut")
        self.assertEqual(actual.sum(), expected.sum(), "Value of dataOut incorrect")
        self.assertEqual(region2.getOutputData("resetOut"), 0, "Value of resetOut incorrect")

        self.assertEqual(region2.getOutputData("sequenceIdOut"), 44, "Value of sequenceIdOut incorrect")
Ejemplo n.º 14
0
  def testSensor(self):

    # Create a simple network to test the sensor
    rawParams = {"outputWidth": 1029}
    net = Network()
    rawSensor = net.addRegion("raw","py.RawSensor", json.dumps(rawParams))
    vfe = net.addRegion("output","VectorFileEffector","")
    net.link("raw", "output", "UniformLink", "")

    self.assertEqual(rawSensor.getParameter("outputWidth"),1029,
                     "Incorrect outputWidth parameter")

    # Add vectors to the queue using two different add methods. Later we
    # will check to ensure these are actually output properly.
    rawSensor.executeCommand(["addDataToQueue", "[2, 4, 6]", "0", "42"])
    rawSensorPy = rawSensor.getSelf()
    rawSensorPy.addDataToQueue([2, 42, 1023], 1, 43)
    rawSensorPy.addDataToQueue([18, 19, 20], 0, 44)

    # Set an output file before we run anything
    vfe.setParameter("outputFile",os.path.join(self.tmpDir,"temp.csv"))

    # Run the network and check outputs are as expected
    net.run(1)
    self.assertEqual(rawSensor.getOutputData("dataOut").nonzero()[0].sum(),
                     sum([2, 4, 6]), "Value of dataOut incorrect")
    self.assertEqual(rawSensor.getOutputData("resetOut").sum(),0,
                     "Value of resetOut incorrect")
    self.assertEqual( rawSensor.getOutputData("sequenceIdOut").sum(),42,
                      "Value of sequenceIdOut incorrect")

    net.run(1)
    self.assertEqual(rawSensor.getOutputData("dataOut").nonzero()[0].sum(),
                     sum([2, 42, 1023]), "Value of dataOut incorrect")
    self.assertEqual(rawSensor.getOutputData("resetOut").sum(),1,
                     "Value of resetOut incorrect")
    self.assertEqual( rawSensor.getOutputData("sequenceIdOut").sum(),43,
                      "Value of sequenceIdOut incorrect")

    # Make sure we can save and load the network after running
    net.save(os.path.join(self.tmpDir,"rawNetwork.nta"))
    net2 = Network(os.path.join(self.tmpDir,"rawNetwork.nta"))
    rawSensor2 = net2.regions.get("raw")
    vfe2 = net2.regions.get("output")

    # Ensure parameters are preserved
    self.assertEqual(rawSensor2.getParameter("outputWidth"),1029,
                     "Incorrect outputWidth parameter")

    # Ensure the queue is preserved through save/load
    vfe2.setParameter("outputFile",os.path.join(self.tmpDir,"temp.csv"))
    net2.run(1)
    self.assertEqual(rawSensor2.getOutputData("dataOut").nonzero()[0].sum(),
                     sum([18, 19, 20]), "Value of dataOut incorrect")
    self.assertEqual(rawSensor2.getOutputData("resetOut").sum(),0,
                     "Value of resetOut incorrect")
    self.assertEqual( rawSensor2.getOutputData("sequenceIdOut").sum(),44,
                      "Value of sequenceIdOut incorrect")
  def testCreateL4L6aLocationColumn(self):
    """
    Test 'createL4L6aLocationColumn' by inferring a set of hand crafted objects
    """
    scale = []
    orientation = []
    # Initialize L6a location region with 5 modules varying scale by sqrt(2) and
    # 4 different random orientations for each scale
    for i in xrange(5):
      for _ in xrange(4):
        angle = np.radians(random.gauss(7.5, 7.5))
        orientation.append(random.choice([angle, -angle]))
        scale.append(10.0 * (math.sqrt(2) ** i))

    net = Network()
    createL4L6aLocationColumn(net, {
      "inverseReadoutResolution": 8,
      "sensorInputSize": NUM_OF_CELLS,
      "L4Params": {
        "columnCount": NUM_OF_COLUMNS,
        "cellsPerColumn": CELLS_PER_COLUMN,
        "activationThreshold": 15,
        "minThreshold": 15,
        "initialPermanence": 1.0,
        "implementation": "ApicalTiebreak",
        "maxSynapsesPerSegment": -1
      },
      "L6aParams": {
        "moduleCount": len(scale),
        "scale": scale,
        "orientation": orientation,
        "anchorInputSize": NUM_OF_CELLS,
        "activationThreshold": 8,
        "initialPermanence": 1.0,
        "connectedPermanence": 0.5,
        "learningThreshold": 8,
        "sampleSize": 10,
        "permanenceIncrement": 0.1,
        "permanenceDecrement": 0.0,
        "bumpOverlapMethod": "probabilistic"
      }
    })
    net.initialize()

    L4 = net.regions['L4']
    L6a = net.regions['L6a']
    sensor = net.regions['sensorInput'].getSelf()
    motor = net.regions['motorInput'].getSelf()

    # Keeps a list of learned objects
    learnedRepresentations = defaultdict(list)

    # Learn Objects
    self._setLearning(net, True)

    for objectDescription in OBJECTS:
      reset = True
      previousLocation = None
      L6a.executeCommand(["activateRandomLocation"])

      for iFeature, feature in enumerate(objectDescription["features"]):
        # Move the sensor to the center of the object
        locationOnObject = np.array([feature["top"] + feature["height"] / 2.,
                                     feature["left"] + feature["width"] / 2.])

        # Calculate displacement from previous location
        if previousLocation is not None:
          motor.addDataToQueue(locationOnObject - previousLocation)
        previousLocation = locationOnObject

        # Sense feature at location
        sensor.addDataToQueue(FEATURE_ACTIVE_COLUMNS[feature["name"]], reset, 0)
        net.run(1)
        reset = False

        # Save learned representations
        representation = L6a.getOutputData("sensoryAssociatedCells")
        representation = representation.nonzero()[0]
        learnedRepresentations[
          (objectDescription["name"], iFeature)] = representation

    # Infer objects
    self._setLearning(net, False)

    for objectDescription in OBJECTS:
      reset = True
      previousLocation = None
      inferred = False

      features = objectDescription["features"]
      touchSequence = range(len(features))
      random.shuffle(touchSequence)

      for iFeature in touchSequence:
        feature = features[iFeature]

        # Move the sensor to the center of the object
        locationOnObject = np.array([feature["top"] + feature["height"] / 2.,
                                     feature["left"] + feature["width"] / 2.])

        # Calculate displacement from previous location
        if previousLocation is not None:
          motor.addDataToQueue(locationOnObject - previousLocation)
        previousLocation = locationOnObject

        # Sense feature at location
        sensor.addDataToQueue(FEATURE_ACTIVE_COLUMNS[feature["name"]], reset, 0)
        net.run(1)
        reset = False

        representation = L6a.getOutputData("sensoryAssociatedCells")
        representation = representation.nonzero()[0]
        target_representations = set(
          learnedRepresentations[(objectDescription["name"], iFeature)])

        inferred = (set(representation) <= target_representations)
        if inferred:
          break

      self.assertTrue(inferred)
    def runNodesTest(self, nodeType1, nodeType2):
        # =====================================================
        # Build and run the network
        # =====================================================
        LOGGER.info('test(level1: %s, level2: %s)', nodeType1, nodeType2)
        net = Network()
        level1 = net.addRegion("level1", nodeType1, "{int32Param: 15}")
        dims = Dimensions([6, 4])
        level1.setDimensions(dims)

        level2 = net.addRegion("level2", nodeType2, "{real64Param: 128.23}")

        net.link("level1", "level2", "TestFanIn2", "")

        # Could call initialize here, but not necessary as net.run()
        # initializes implicitly.
        # net.initialize()

        net.run(1)
        LOGGER.info("Successfully created network and ran for one iteration")

        # =====================================================
        # Check everything
        # =====================================================
        dims = level1.getDimensions()
        self.assertEqual(len(dims), 2)
        self.assertEqual(dims[0], 6)
        self.assertEqual(dims[1], 4)

        dims = level2.getDimensions()
        self.assertEqual(len(dims), 2)
        self.assertEqual(dims[0], 3)
        self.assertEqual(dims[1], 2)

        # Check L1 output. "False" means don't copy, i.e.
        # get a pointer to the actual output
        # Actual output values are determined by the TestNode
        # compute() behavior.
        l1output = level1.getOutputData("bottomUpOut")
        self.assertEqual(len(l1output), 48)  # 24 nodes; 2 values per node
        for i in xrange(24):
            self.assertEqual(l1output[2 * i],
                             0)  # size of input to each node is 0
            self.assertEqual(l1output[2 * i + 1], i)  # node number

        # check L2 output.
        l2output = level2.getOutputData("bottomUpOut")
        self.assertEqual(len(l2output), 12)  # 6 nodes; 2 values per node
        # Output val = node number + sum(inputs)
        # Can compute from knowing L1 layout
        #
        #  00 01 | 02 03 | 04 05
        #  06 07 | 08 09 | 10 11
        #  ---------------------
        #  12 13 | 14 15 | 16 17
        #  18 19 | 20 21 | 22 23
        outputVals = []
        outputVals.append(0 + (0 + 1 + 6 + 7))
        outputVals.append(1 + (2 + 3 + 8 + 9))
        outputVals.append(2 + (4 + 5 + 10 + 11))
        outputVals.append(3 + (12 + 13 + 18 + 19))
        outputVals.append(4 + (14 + 15 + 20 + 21))
        outputVals.append(5 + (16 + 17 + 22 + 23))
        for i in xrange(6):
            if l2output[2 * i] != 8:
                LOGGER.info(l2output[2 * i])
                # from dbgp.client import brk; brk(port=9019)

            self.assertEqual(l2output[2 * i],
                             8)  # size of input for each node is 8
            self.assertEqual(l2output[2 * i + 1], outputVals[i])

        # =====================================================
        # Run for one more iteration
        # =====================================================
        LOGGER.info("Running for a second iteration")
        net.run(1)

        # =====================================================
        # Check everything again
        # =====================================================

        # Outputs are all the same except that the first output is
        # incremented by the iteration number
        for i in xrange(24):
            self.assertEqual(l1output[2 * i], 1)
            self.assertEqual(l1output[2 * i + 1], i)

        for i in xrange(6):
            self.assertEqual(l2output[2 * i], 9)
            self.assertEqual(l2output[2 * i + 1], outputVals[i] + 4)

        # =====================================================
        # Demonstrate a few other features
        # =====================================================

        #
        # Linking can induce dimensions downward
        #

        net = Network()
        level1 = net.addRegion("level1", nodeType1, "")
        level2 = net.addRegion("level2", nodeType2, "")
        dims = Dimensions([3, 2])
        level2.setDimensions(dims)
        net.link("level1", "level2", "TestFanIn2", "")
        net.initialize()

        # Level1 should now have dimensions [6, 4]
        self.assertEqual(level1.getDimensions()[0], 6)
        self.assertEqual(level1.getDimensions()[1], 4)
Ejemplo n.º 17
0
  def testSimpleMulticlassNetwork(self):
  
    # Setup data record stream of fake data (with three categories)
    filename = _getTempFileName()
    fields = [("timestamp", "datetime", "T"),
              ("value", "float", ""),
              ("reset", "int", "R"),
              ("sid", "int", "S"),
              ("categories", "list", "C")]
    records = (
      [datetime(day=1, month=3, year=2010), 0.0, 1, 0, ""],
      [datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1 2"],
      [datetime(day=3, month=3, year=2010), 1.0, 0, 0, "1 2"],
      [datetime(day=4, month=3, year=2010), 2.0, 0, 0, "0"],
      [datetime(day=5, month=3, year=2010), 3.0, 0, 0, "1 2"],
      [datetime(day=6, month=3, year=2010), 5.0, 0, 0, "1 2"],
      [datetime(day=7, month=3, year=2010), 8.0, 0, 0, "0"],
      [datetime(day=8, month=3, year=2010), 13.0, 0, 0, "1 2"])
    dataSource = FileRecordStream(streamID=filename, write=True, fields=fields)
    for r in records:
      dataSource.appendRecord(list(r))

    # Create the network and get region instances.
    net = Network()
    net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
    net.addRegion("classifier","py.KNNClassifierRegion",
                  "{'k': 2,'distThreshold': 0,'maxCategoryCount': 3}")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput = "dataOut", destInput = "bottomUpIn")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput = "categoryOut", destInput = "categoryIn")
    sensor = net.regions["sensor"]
    classifier = net.regions["classifier"]
    
    # Setup sensor region encoder and data stream.
    dataSource.close()
    dataSource = FileRecordStream(filename)
    sensorRegion = sensor.getSelf()
    sensorRegion.encoder = MultiEncoder()
    sensorRegion.encoder.addEncoder(
        "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
    sensorRegion.dataSource = dataSource
    
    # Get ready to run.
    net.initialize()

    # Train the network (by default learning is ON in the classifier, but assert
    # anyway) and then turn off learning and turn on inference mode.
    self.assertEqual(classifier.getParameter("learningMode"), 1)
    net.run(8)
    classifier.setParameter("inferenceMode", 1)
    classifier.setParameter("learningMode", 0)

    # Assert learning is OFF and that the classifier learned the dataset.
    self.assertEqual(classifier.getParameter("learningMode"), 0,
        "Learning mode is not turned off.")
    self.assertEqual(classifier.getParameter("inferenceMode"), 1,
        "Inference mode is not turned on.")
    self.assertEqual(classifier.getParameter("categoryCount"), 3,
        "The classifier should count three total categories.")
    # classififer learns 12 patterns b/c there are 12 categories amongst the
    # records:
    self.assertEqual(classifier.getParameter("patternCount"), 12,
        "The classifier should've learned 12 samples in total.")

    # Test the network on the same data as it trained on; should classify with
    # 100% accuracy.
    expectedCats = ([0.0, 0.5, 0.5],
                    [0.0, 0.5, 0.5],
                    [0.0, 0.5, 0.5],
                    [0.5, 0.5, 0.0],
                    [0.0, 0.5, 0.5],
                    [0.0, 0.5, 0.5],
                    [0.5, 0.5, 0.0],
                    [0.0, 0.5, 0.5])
    dataSource.rewind()
    for i in xrange(8):
      net.run(1)
      inferredCats = classifier.getOutputData("categoriesOut")
      self.assertSequenceEqual(expectedCats[i], inferredCats.tolist(),
          "Classififer did not infer expected category probabilites for record "
          "number {}.".format(i))
    
    # Close data stream, delete file.
    dataSource.close()
    os.remove(filename)
Ejemplo n.º 18
0
class ClaClassifier():
    def __init__(self, net_structure, sensor_params, dest_region_params,
                 class_encoder_params):

        self.run_number = 0

        # for classifier
        self.classifier_encoder_list = {}
        self.classifier_input_list = {}
        self.prevPredictedColumns = {}

        # TODO: 消したいパラメータ
        self.predict_value = class_encoder_params.keys()[0]
        self.predict_step = 0

        # default param
        self.default_params = {
            'SP_PARAMS': {
                "spVerbosity": 0,
                "spatialImp": "cpp",
                "globalInhibition": 1,
                "columnCount": 2024,
                "inputWidth": 0,  # set later
                "numActiveColumnsPerInhArea": 20,
                "seed": 1956,
                "potentialPct": 0.8,
                "synPermConnected": 0.1,
                "synPermActiveInc": 0.05,
                "synPermInactiveDec": 0.0005,
                "maxBoost": 2.0,
            },
            'TP_PARAMS': {
                "verbosity": 0,
                "columnCount": 2024,
                "cellsPerColumn": 32,
                "inputWidth": 2024,
                "seed": 1960,
                "temporalImp": "cpp",
                "newSynapseCount": 20,
                "maxSynapsesPerSegment": 32,
                "maxSegmentsPerCell": 128,
                "initialPerm": 0.21,
                "permanenceInc": 0.2,
                "permanenceDec": 0.1,
                "globalDecay": 0.0,
                "maxAge": 0,
                "minThreshold": 12,
                "activationThreshold": 16,
                "outputType": "normal",
                "pamLength": 1,
            },
            'CLASSIFIER_PARAMS': {
                "clVerbosity": 0,
                "alpha": 0.005,
                "steps": "0"
            }
        }

        # tp
        self.tp_enable = True

        # net structure
        self.net_structure = OrderedDict()
        self.net_structure['sensor3'] = ['region1']
        self.net_structure['region1'] = ['region2']

        self.net_structure = net_structure

        # region change params
        self.dest_region_params = dest_region_params

        # sensor change params
        self.sensor_params = sensor_params

        self.class_encoder_params = class_encoder_params

        self._createNetwork()

    def _makeRegion(self, name, params):
        sp_name = "sp_" + name
        if self.tp_enable:
            tp_name = "tp_" + name
        class_name = "class_" + name

        # addRegion
        self.network.addRegion(sp_name, "py.SPRegion",
                               json.dumps(params['SP_PARAMS']))
        if self.tp_enable:
            self.network.addRegion(tp_name, "py.TPRegion",
                                   json.dumps(params['TP_PARAMS']))
        self.network.addRegion(class_name, "py.CLAClassifierRegion",
                               json.dumps(params['CLASSIFIER_PARAMS']))

        encoder = MultiEncoder()
        encoder.addMultipleEncoders(self.class_encoder_params)
        self.classifier_encoder_list[class_name] = encoder
        if self.tp_enable:
            self.classifier_input_list[class_name] = tp_name
        else:
            self.classifier_input_list[class_name] = sp_name

    def _linkRegion(self, src_name, dest_name):
        sensor = src_name
        sp_name = "sp_" + dest_name
        tp_name = "tp_" + dest_name
        class_name = "class_" + dest_name

        if self.tp_enable:
            self.network.link(sensor, sp_name, "UniformLink", "")
            self.network.link(sp_name, tp_name, "UniformLink", "")
            self.network.link(tp_name, class_name, "UniformLink", "")
        else:
            self.network.link(sensor, sp_name, "UniformLink", "")
            self.network.link(sp_name, class_name, "UniformLink", "")

    def _initRegion(self, name):
        sp_name = "sp_" + name
        tp_name = "tp_" + name
        class_name = "class_" + name

        # setting sp
        SP = self.network.regions[sp_name]
        SP.setParameter("learningMode", True)
        SP.setParameter("anomalyMode", True)

        # # setting tp
        if self.tp_enable:
            TP = self.network.regions[tp_name]
            TP.setParameter("topDownMode", False)
            TP.setParameter("learningMode", True)
            TP.setParameter("inferenceMode", True)
            TP.setParameter("anomalyMode", False)

        # classifier regionを定義.
        classifier = self.network.regions[class_name]
        classifier.setParameter('inferenceMode', True)
        classifier.setParameter('learningMode', True)

    def _createNetwork(self):
        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            if update is None:
                return None
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update

        self.network = Network()

        # check
        # if self.selectivity not in self.dest_region_params.keys():
        #     raise Exception, "There is no selected region : " + self.selectivity
        if not len(self.net_structure.keys()) == len(
                set(self.net_structure.keys())):
            raise Exception, "There is deplicated net_structure keys : " + self.net_structure.keys(
            )

        # sensor
        for sensor_name, params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor",
                                   json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            #params = deepupdate(cn.SENSOR_PARAMS, params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders(params)
            sensor.encoder = encoder
            sensor.dataSource = DataBuffer()

        # network
        print 'create element ...'
        for name in self.dest_region_params.keys():
            change_params = self.dest_region_params[name]
            params = deepupdate(self.default_params, change_params)
            # input width
            input_width = 0
            for source in [
                    s for s, d in self.net_structure.items() if name in d
            ]:
                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    input_width += sensor.encoder.getWidth()
                else:
                    input_width += params['TP_PARAMS'][
                        'cellsPerColumn'] * params['TP_PARAMS']['columnCount']

            params['SP_PARAMS']['inputWidth'] = input_width
            self._makeRegion(name, params)

        # link
        print 'link network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                if source in self.sensor_params.keys():
                    self._linkRegion(source, dest)
                else:
                    if self.tp_enable:
                        self._linkRegion("tp_" + source, dest)
                    else:
                        self._linkRegion("sp_" + source, dest)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in self.dest_region_params.keys():
            self._initRegion(name)

        return

    #@profile
    def run(self, input_data, learn=True, class_learn=True, learn_layer=None):
        """
        networkの実行.
        学習したいときは, learn=True, ftypeを指定する.
        予測したいときは, learn=False, ftypeはNoneを指定する.
        学習しているときも, 予測はしているがな.

        input_data = {'xy_value': [1.0, 2.0], 'ftype': 'sin'}
        """

        self.enable_learning_mode(learn, learn_layer)
        self.enable_class_learning_mode(class_learn)

        self.run_number += 1

        # calc encoder, SP, TP
        for sensor_name in self.sensor_params.keys():
            self.network.regions[sensor_name].getSelf().dataSource.push(
                input_data)
        self.network.run(1)
        #self.layer_output(input_data)
        #self.debug(input_data)

        # learn classifier
        inferences = {}
        for name in self.dest_region_params.keys():
            class_name = "class_" + name
            inferences['classifier_' + name] = self._learn_classifier_multi(
                class_name,
                actValue=input_data[self.predict_value],
                pstep=self.predict_step)

        # anomaly
        #inferences["anomaly"] = self._calc_anomaly()

        return inferences

    def _learn_classifier_multi(self, region_name, actValue=None, pstep=0):
        """
        classifierの計算を行う.

        直接customComputeを呼び出さずに, network.runの中でやりたいところだけど,
        計算した内容の取り出し方法がわからない.
        """

        # TODO: networkとclassifierを完全に切り分けたいな.
        #       networkでは, sensor,sp,tpまで計算を行う.
        #       その計算結果の評価/利用は外に出す.

        classifier = self.network.regions[region_name]
        encoder = self.classifier_encoder_list[region_name].getEncoderList()[0]
        class_input = self.classifier_input_list[region_name]
        tp_bottomUpOut = self.network.regions[class_input].getOutputData(
            "bottomUpOut").nonzero()[0]
        #tp_bottomUpOut = self.network.regions["TP"].getSelf()._tfdr.infActiveState['t'].reshape(-1).nonzero()[0]

        if actValue is not None:
            bucketIdx = encoder.getBucketIndices(actValue)[0]
            classificationIn = {'bucketIdx': bucketIdx, 'actValue': actValue}
        else:
            classificationIn = {'bucketIdx': 0, 'actValue': 'no'}
        clResults = classifier.getSelf().customCompute(
            recordNum=self.run_number,
            patternNZ=tp_bottomUpOut,
            classification=classificationIn)

        inferences = self._get_inferences(clResults, pstep, summary_tyep='sum')

        return inferences

    def _get_inferences(self, clResults, steps, summary_tyep='sum'):
        """
        classifierの計算結果を使いやすいように変更するだけ.
        """

        likelihoodsVec = clResults[steps]
        bucketValues = clResults['actualValues']

        likelihoodsDict = defaultdict(int)
        bestActValue = None
        bestProb = None

        if summary_tyep == 'sum':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                likelihoodsDict[actValue] += prob
                if bestProb is None or likelihoodsDict[actValue] > bestProb:
                    bestProb = likelihoodsDict[actValue]
                    bestActValue = actValue

        elif summary_tyep == 'best':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                if bestProb is None or prob > bestProb:
                    likelihoodsDict[actValue] = prob
                    bestProb = prob
                    bestActValue = actValue

        return {
            'likelihoodsDict': likelihoodsDict,
            'best': {
                'value': bestActValue,
                'prob': bestProb
            }
        }

    def _calc_anomaly(self):
        """
        各層のanomalyを計算
        """

        score = 0
        anomalyScore = {}
        for name in self.dest_region_params.keys():
            #sp_bottomUpOut = self.network.regions["sp_"+name].getOutputData("bottomUpOut").nonzero()[0]
            sp_bottomUpOut = self.network.regions["tp_" + name].getInputData(
                "bottomUpIn").nonzero()[0]

            if self.prevPredictedColumns.has_key(name):
                score = computeAnomalyScore(sp_bottomUpOut,
                                            self.prevPredictedColumns[name])
            #topdown_predict = self.network.regions["TP"].getSelf()._tfdr.topDownCompute().copy().nonzero()[0]
            topdown_predict = self.network.regions[
                "tp_" + name].getSelf()._tfdr.topDownCompute().nonzero()[0]
            self.prevPredictedColumns[name] = copy.deepcopy(topdown_predict)

            anomalyScore[name] = score

        return anomalyScore

    def reset(self):
        """
        reset sequence
        """
        # for name in self.dest_region_params.keys():
        #     self.network.regions["tp_"+name].getSelf().resetSequenceStates()
        return

        # for sensor_name in self.sensor_params.keys():
        #     sensor = self.network.regions[sensor_name].getSelf()
        #     sensor.dataSource = DataBuffer()

    def enable_class_learning_mode(self, enable):
        for name in self.dest_region_params.keys():
            self.network.regions["class_" + name].setParameter(
                "learningMode", enable)

    def enable_learning_mode(self, enable, layer_name=None):
        """
        各層のSP, TP, ClassifierのlearningModeを変更
        """
        if layer_name is None:
            for name in self.dest_region_params.keys():
                self.network.regions["sp_" + name].setParameter(
                    "learningMode", enable)
                if self.tp_enable:
                    self.network.regions["tp_" + name].setParameter(
                        "learningMode", enable)
                self.network.regions["class_" + name].setParameter(
                    "learningMode", enable)
        else:
            for name in self.dest_region_params.keys():
                self.network.regions["sp_" + name].setParameter(
                    "learningMode", not enable)
                if self.tp_enable:
                    self.network.regions["tp_" + name].setParameter(
                        "learningMode", not enable)
                self.network.regions["class_" + name].setParameter(
                    "learningMode", not enable)
            for name in layer_name:
                self.network.regions["sp_" + name].setParameter(
                    "learningMode", enable)
                if self.tp_enable:
                    self.network.regions["tp_" + name].setParameter(
                        "learningMode", enable)
                self.network.regions["class_" + name].setParameter(
                    "learningMode", enable)

    def print_inferences(self, input_data, inferences):
        """
        計算結果を出力する
        """

        # print "%10s, %10s, %1s" % (
        #         int(input_data['xy_value'][0]),
        #         int(input_data['xy_value'][1]),
        #         input_data['label'][:1]),
        print "%5s" % (input_data['label']),

        try:
            for name in sorted(self.dest_region_params.keys()):
                print "%5s" % (inferences['classifier_' +
                                          name]['best']['value']),

            for name in sorted(self.dest_region_params.keys()):
                print "%6.4f," % (inferences['classifier_' +
                                             name]['likelihoodsDict']
                                  [input_data[self.predict_value]]),
        except:
            pass

        # for name in sorted(self.dest_region_params.keys()):
        #     print "%3.2f," % (inferences["anomaly"][name]),

        # for name in sorted(self.dest_region_params.keys()):
        #     print "%5s," % name,

        print

    def layer_output(self, input_data, region_name=None):
        if region_name is not None:
            Region = self.network.regions[region_name]
            print Region.getOutputData("bottomUpOut").nonzero()[0]
            return

        for name in self.dest_region_params.keys():
            SPRegion = self.network.regions["sp_" + name]
            if self.tp_enable:
                TPRegion = self.network.regions["tp_" + name]

            print "#################################### ", name
            print
            print "==== SP layer ===="
            print "input:  ", SPRegion.getInputData(
                "bottomUpIn").nonzero()[0][:20]
            print "output: ", SPRegion.getOutputData(
                "bottomUpOut").nonzero()[0][:20]
            print
            if self.tp_enable:
                print "==== TP layer ===="
                print "input:  ", TPRegion.getInputData(
                    "bottomUpIn").nonzero()[0][:20]
                print "output: ", TPRegion.getOutputData(
                    "bottomUpOut").nonzero()[0][:20]
                print
            print "==== Predict ===="
            print TPRegion.getSelf()._tfdr.topDownCompute().copy().nonzero(
            )[0][:20]
            print

    def save(self, path):
        import pickle
        with open(path, 'wb') as modelPickleFile:
            pickle.dump(self, modelPickleFile)
class FunctionRecogniter():

    def __init__(self):

        self.run_number = 0

        # for classifier
        self.classifier_encoder_list = {}
        self.classifier_input_list   = {}
        self.prevPredictedColumns    = {}

        self.selectivity = "region2"

        # net structure
        self.net_structure = OrderedDict()
        self.net_structure['sensor3'] = ['region1']
        self.net_structure['region1'] = ['region2']

        # self.net_structure['sensor1'] = ['region1']
        # self.net_structure['sensor2'] = ['region2']
        # self.net_structure['region1'] = ['region3']
        # self.net_structure['region2'] = ['region3']



        # region change params
        self.dest_resgion_data = {
                'region1': {
                    'TP_PARAMS':{
                        "cellsPerColumn": 8,
                        "permanenceInc": 0.2,
                        "permanenceDec": 0.1,
                        #"permanenceDec": 0.0001,
                        },
                    },
                'region2': {
                    'SP_PARAMS':{
                        "inputWidth": 2024 * (8),
                        },
                    'TP_PARAMS':{
                        "cellsPerColumn": 8,
                        "permanenceInc": 0.2,
                        "permanenceDec": 0.1,
                        },
                    },
                # 'region3': {
                #     'SP_PARAMS':{
                #         "inputWidth": 2024 * (8),
                #         },
                #     'TP_PARAMS':{
                #         "cellsPerColumn": 8,
                #         },
                #     },
                 }

        # sensor change params
        self.sensor_params = {
                'sensor1': {
                    'xy_value': None,
                    'x_value': {
                        "fieldname": u"x_value",
                        "name": u"x_value",
                        "type": "ScalarEncoder",
                        'maxval': 100.0,
                        'minval':  0.0,
                        "n": 200,
                        "w": 21,
                        "clipInput": True
                        },
                    },
                'sensor2': {
                    'xy_value': None,
                    'y_value': {
                        "fieldname": u"y_value",
                        "name": u"y_value",
                        "type": "ScalarEncoder",
                        'maxval': 100.0,
                        'minval':  0.0,
                        "n": 200,
                        "w": 21,
                        "clipInput": True
                        },
                    },
                'sensor3': {
                    'xy_value': {
                        'maxval': 100.0,
                        'minval':   0.0
                        },
                    },
                # 'sensor3': {
                #     'xy_value': {
                #         'maxval': 100.0,
                #         'minval':  40.0
                #         },
                #     },
                }

        self._createNetwork()


        # for evaluate netwrok accuracy
        self.evaluation = {}
        for name in self.dest_resgion_data.keys():
            self.evaluation[name] = NetworkEvaluation()

        self.evaluation_2 = {}
        for name in self.dest_resgion_data.keys():
            self.evaluation_2[name] = NetworkEvaluation()


        self.prev_layer_input  = defaultdict(lambda : defaultdict(list))

    def _addRegion(self, src_name, dest_name, params):

        sensor     =  src_name
        sp_name    = "sp_" + dest_name
        tp_name    = "tp_" + dest_name
        class_name = "class_" + dest_name

        try:
            self.network.regions[sp_name]
            self.network.regions[tp_name]
            self.network.regions[class_name]
            self.network.link(sensor, sp_name, "UniformLink", "")

        except Exception as e:
            # sp
            self.network.addRegion(sp_name, "py.SPRegion", json.dumps(params['SP_PARAMS']))
            self.network.link(sensor, sp_name, "UniformLink", "")

            # tp
            self.network.addRegion(tp_name, "py.TPRegion", json.dumps(params['TP_PARAMS']))
            self.network.link(sp_name, tp_name, "UniformLink", "")

            # class
            self.network.addRegion( class_name, "py.CLAClassifierRegion", json.dumps(params['CLASSIFIER_PARAMS']))
            self.network.link(tp_name, class_name, "UniformLink", "")

            encoder = MultiEncoder()
            encoder.addMultipleEncoders(params['CLASSIFIER_ENCODE_PARAMS'])
            self.classifier_encoder_list[class_name]  = encoder
            self.classifier_input_list[class_name]    = tp_name

    def _initRegion(self, name):
        sp_name = "sp_"+ name
        tp_name = "tp_"+ name
        class_name = "class_"+ name

        # setting sp
        SP = self.network.regions[sp_name]
        SP.setParameter("learningMode", True)
        SP.setParameter("anomalyMode", True)

        # setting tp
        TP = self.network.regions[tp_name]
        TP.setParameter("topDownMode", False)
        TP.setParameter("learningMode", True)
        TP.setParameter("inferenceMode", True)
        TP.setParameter("anomalyMode", False)

        # classifier regionを定義.
        classifier = self.network.regions[class_name]
        classifier.setParameter('inferenceMode', True)
        classifier.setParameter('learningMode', True)


    def _createNetwork(self):

        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            if update is None:
                return None
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update




        self.network = Network()

        # check
        if self.selectivity not in self.dest_resgion_data.keys():
            raise Exception, "There is no selected region : " + self.selectivity
        if not len(self.net_structure.keys()) == len(set(self.net_structure.keys())):
            raise Exception, "There is deplicated net_structure keys : " + self.net_structure.keys()

        # sensor
        for sensor_name, change_params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor", json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            params = deepupdate(cn.SENSOR_PARAMS, change_params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders( params )
            sensor.encoder         = encoder

            # set datasource
            sensor.dataSource      = cn.DataBuffer()


        # network
        print 'create network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                change_params = self.dest_resgion_data[dest]
                params = deepupdate(cn.PARAMS, change_params)

                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    params['SP_PARAMS']['inputWidth'] = sensor.encoder.getWidth()
                    self._addRegion(source, dest, params)
                else:
                    #self._addRegion("sp_" + source, dest, params)
                    self._addRegion("tp_" + source, dest, params)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            self._initRegion(name)


        # TODO: 1-3-1構造で, TPのセル数をむやみに増やすことは逆効果になるのでは?

        return


    def run(self, input_data, learn=True, learn_layer=None):
        """
        networkの実行.
        学習したいときは, learn=True, ftypeを指定する.
        予測したいときは, learn=False, ftypeはNoneを指定する.
        学習しているときも, 予測はしているがな.

        input_data = {'xy_value': [1.0, 2.0], 'ftype': 'sin'}
        """

        self.enable_learning_mode(learn, learn_layer)
        self.run_number += 1

        # calc encoder, SP, TP
        for sensor_name in self.sensor_params.keys():
            self.network.regions[sensor_name].getSelf().dataSource.push(input_data)
        self.network.run(1)
        #self.layer_output(input_data)
        #self.debug(input_data)


        # learn classifier
        inferences = {}
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            class_name = "class_" + name
            inferences['classifier_'+name]   = self._learn_classifier_multi(class_name, actValue=input_data['ftype'], pstep=0)



        # anomaly
        inferences["anomaly"] = self._calc_anomaly()

        # output differ
        #inferences["output_differ"] = self._calc_output_differ()

        # # selectivity
        # if input_data['ftype'] is not None and input_data['xy_value'][0] >= 45 and input_data['xy_value'][0] <= 55:
        #     #self.layer_output(input_data)
        #     for name in self.dest_resgion_data.keys():
        #         tp_bottomUpOut = self.network.regions[ "tp_" + name ].getOutputData("bottomUpOut").nonzero()[0]
        #         self.evaluation[name].save_cell_activity(tp_bottomUpOut, input_data['ftype'])
        #
        # if input_data['ftype'] is not None and (input_data['xy_value'][0] <= 5 or input_data['xy_value'][0] >= 95):
        #     for name in self.dest_resgion_data.keys():
        #         tp_bottomUpOut = self.network.regions[ "tp_" + name ].getOutputData("bottomUpOut").nonzero()[0]
        #         self.evaluation_2[name].save_cell_activity(tp_bottomUpOut, input_data['ftype'])

        return inferences


    def _learn_classifier_multi(self, region_name, actValue=None, pstep=0):
        """
        classifierの計算を行う.

        直接customComputeを呼び出さずに, network.runの中でやりたいところだけど,
        計算した内容の取り出し方法がわからない.
        """

        # TODO: networkとclassifierを完全に切り分けたいな.
        #       networkでは, sensor,sp,tpまで計算を行う.
        #       その計算結果の評価/利用は外に出す.

        classifier     = self.network.regions[region_name]
        encoder        = self.classifier_encoder_list[region_name].getEncoderList()[0]
        class_input    = self.classifier_input_list[region_name]
        tp_bottomUpOut = self.network.regions[class_input].getOutputData("bottomUpOut").nonzero()[0]
        #tp_bottomUpOut = self.network.regions["TP"].getSelf()._tfdr.infActiveState['t'].reshape(-1).nonzero()[0]

        if actValue is not None:
            bucketIdx = encoder.getBucketIndices(actValue)[0]
            classificationIn = {
                    'bucketIdx': bucketIdx,
                    'actValue': actValue
                    }
        else:
            classificationIn = {'bucketIdx': 0,'actValue': 'no'}
        clResults = classifier.getSelf().customCompute(
                recordNum=self.run_number,
                patternNZ=tp_bottomUpOut,
                classification=classificationIn
                )

        inferences= self._get_inferences(clResults, pstep, summary_tyep='sum')

        return inferences

    def _get_inferences(self, clResults, steps, summary_tyep='sum'):
        """
        classifierの計算結果を使いやすいように変更するだけ.
        """

        likelihoodsVec = clResults[steps]
        bucketValues   = clResults['actualValues']

        likelihoodsDict = defaultdict(int)
        bestActValue = None
        bestProb = None

        if summary_tyep == 'sum':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                likelihoodsDict[actValue] += prob
                if bestProb is None or likelihoodsDict[actValue] > bestProb:
                    bestProb = likelihoodsDict[actValue]
                    bestActValue = actValue

        elif summary_tyep == 'best':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                if bestProb is None or prob > bestProb:
                    likelihoodsDict[actValue] = prob
                    bestProb = prob
                    bestActValue = actValue

        return {'likelihoodsDict': likelihoodsDict, 'best': {'value': bestActValue, 'prob':bestProb}}

    def _calc_output_differ(self):
        """
        同じ入力があったときに, 前回の入力との差を計算する.
        学習が進んでいるかどうかの指標に出来るかなと思った.

        全く同じ: 0
        全部違う: 1
        """

        score = 0
        #self.prev_layer_input  = defaultdict(lambda defaultdict(list))
        output_differ = {}

        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):

            tp_input = self.network.regions["tp_"+name].getInputData("bottomUpIn").nonzero()[0]
            tp_output = self.network.regions["tp_"+name].getOutputData("bottomUpOut").nonzero()[0]

            if self.prev_layer_input[name].has_key(tuple(tp_input)):
                prev_output = self.prev_layer_input[name][tuple(tp_input)]

                same_cell = (set(prev_output) & set(tp_output))
                output_differ[name] = 1 - float(len(same_cell) )/ len(tp_output)

            self.prev_layer_input[name][tuple(tp_input)] = tp_output

        return output_differ

    def _calc_anomaly(self):
        """
        各層のanomalyを計算
        """

        score = 0
        anomalyScore = {}
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            #sp_bottomUpOut = self.network.regions["sp_"+name].getOutputData("bottomUpOut").nonzero()[0]
            sp_bottomUpOut = self.network.regions["tp_"+name].getInputData("bottomUpIn").nonzero()[0]

            if self.prevPredictedColumns.has_key(name):
                score = computeAnomalyScore(sp_bottomUpOut, self.prevPredictedColumns[name])
            #topdown_predict = self.network.regions["TP"].getSelf()._tfdr.topDownCompute().copy().nonzero()[0]
            topdown_predict = self.network.regions["tp_"+name].getSelf()._tfdr.topDownCompute().nonzero()[0]
            self.prevPredictedColumns[name] = copy.deepcopy(topdown_predict)

            anomalyScore[name] = score

        return anomalyScore

    def reset(self):
        """
        reset sequence
        """
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            self.network.regions["tp_"+name].getSelf().resetSequenceStates()

    def enable_learning_mode(self, enable, layer_name = None):
        """
        各層のSP, TP, ClassifierのlearningModeを変更
        """
        if layer_name is None:
            for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
                self.network.regions["sp_"+name].setParameter("learningMode", enable)
                self.network.regions["tp_"+name].setParameter("learningMode", enable)
                self.network.regions["class_"+name].setParameter("learningMode", enable)
        else:
            for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
                self.network.regions["sp_"+name].setParameter("learningMode", not enable)
                self.network.regions["tp_"+name].setParameter("learningMode", not enable)
                self.network.regions["class_"+name].setParameter("learningMode", not enable)
            for name in layer_name:
                self.network.regions["sp_"+name].setParameter("learningMode", enable)
                self.network.regions["tp_"+name].setParameter("learningMode", enable)
                self.network.regions["class_"+name].setParameter("learningMode", enable)


    def print_inferences(self, input_data, inferences):
        """
        計算結果を出力する
        """

        print "%10s, %10s, %1s" % (
                int(input_data['xy_value'][0]),
                int(input_data['xy_value'][1]),
                input_data['ftype'][:1]),


        for name in sorted(self.dest_resgion_data.keys()):
            print "%1s" % (inferences['classifier_'+name]['best']['value'][:1]),

        for name in sorted(self.dest_resgion_data.keys()):
            print "%6.4f," % (inferences['classifier_'+name]['likelihoodsDict'][input_data['ftype']]),

        for name in sorted(self.dest_resgion_data.keys()):
            print "%3.2f," % (inferences["anomaly"][name]),

        # for name in sorted(self.dest_resgion_data.keys()):
        #     print "%5s," % name,

        print

    def layer_output(self, input_data, region_name=None):
        if region_name is not None:
            Region = self.network.regions[region_name]
            print Region.getOutputData("bottomUpOut").nonzero()[0]
            return

        for name in self.dest_resgion_data.keys():
            SPRegion = self.network.regions["sp_"+name]
            TPRegion = self.network.regions["tp_"+name]

            print "#################################### ", name
            print
            print "==== SP layer ===="
            print "input:  ", SPRegion.getInputData("bottomUpIn").nonzero()[0]
            print "output: ", SPRegion.getOutputData("bottomUpOut").nonzero()[0]
            print
            print "==== TP layer ===="
            print "input:  ", TPRegion.getInputData("bottomUpIn").nonzero()[0]
            print "output: ", TPRegion.getOutputData("bottomUpOut").nonzero()[0]
            print
            print "==== Predict ===="
            print TPRegion.getSelf()._tfdr.topDownCompute().copy().nonzero()[0][:10]
            print
Ejemplo n.º 20
0
class SaccadeNetwork(object):
    """
  A HTM network structured as follows:
  SaccadeSensor (RandomSaccade) -> SP -> TM -> Classifier (KNN)
  """
    def __init__(self,
                 networkName,
                 trainingSet,
                 testingSet,
                 loggingDir=None,
                 validationSet=None,
                 detailedSaccadeWidth=IMAGE_WIDTH,
                 detailedSaccadeHeight=IMAGE_HEIGHT,
                 createNetwork=True):
        """
    :param str networkName: Where the network will be serialized/saved to
    :param str trainingSet: Path to set of images to train on
    :param str testingSet: Path to set of images to test
    :param str loggingDir: directory to store logged images in
      (note: no image logging if none)
    :param validationSet: (optional) Path to set of images to validate on
    :param int detailedSaccadeWidth: (optional) Width of detailed saccades to
      return from the runNetworkOneImage and testNetworkOneImage
    :param int detailedSaccadeHeight: (optional) Height of detailed saccades to
      return from the runNetworkOneImage and testNetworkOneImage
    :param bool createNetwork: If false, wait until createNet is manually
      called to create the network. Otherwise, create on __init__
    """
        self.loggingDir = loggingDir
        self.netFile = networkName
        self.trainingSet = trainingSet
        self.validationSet = validationSet
        self.testingSet = testingSet
        self.detailedSaccadeWidth = detailedSaccadeWidth
        self.detailedSaccadeHeight = detailedSaccadeHeight

        self.net = None
        self.trainingImageIndex = None
        self.networkDutyCycles = None
        self.networkSensor = None
        self.networkSP = None
        self.networkTM = None
        self.networkTP = None
        self.networkClassifier = None
        self.networkSensor = None
        self.numTrainingImages = 0
        self.numTestingImages = 0
        self.trainingImageIndex = 0
        self.testingImageIndex = 0
        self.numCorrect = 0

        if createNetwork:
            self.createNet()

    def createNet(self):
        """ Set up the structure of the network """
        net = Network()

        Network.unregisterRegion(SaccadeSensor.__name__)
        Network.registerRegion(SaccadeSensor)
        Network.unregisterRegion(ExtendedTMRegion.__name__)
        Network.registerRegion(ExtendedTMRegion)
        Network.unregisterRegion(ColumnPoolerRegion.__name__)
        Network.registerRegion(ColumnPoolerRegion)

        imageSensorParams = copy.deepcopy(DEFAULT_IMAGESENSOR_PARAMS)
        if self.loggingDir is not None:
            imageSensorParams["logDir"] = "sensorImages/" + self.loggingDir
            imageSensorParams["logOutputImages"] = 1
            imageSensorParams["logOriginalImages"] = 1
            imageSensorParams["logFilteredImages"] = 1
            imageSensorParams["logLocationImages"] = 1
            imageSensorParams["logLocationOnOriginalImage"] = 1

        net.addRegion("sensor", "py.SaccadeSensor",
                      yaml.dump(imageSensorParams))
        sensor = net.regions["sensor"].getSelf()

        DEFAULT_SP_PARAMS["columnCount"] = sensor.getOutputElementCount(
            "dataOut")
        net.addRegion("SP", "py.SPRegion", yaml.dump(DEFAULT_SP_PARAMS))
        sp = net.regions["SP"].getSelf()

        DEFAULT_TM_PARAMS["columnDimensions"] = (
            sp.getOutputElementCount("bottomUpOut"), )
        DEFAULT_TM_PARAMS["basalInputWidth"] = sensor.getOutputElementCount(
            "saccadeOut")
        net.addRegion("TM", "py.ExtendedTMRegion",
                      yaml.dump(DEFAULT_TM_PARAMS))

        net.addRegion("TP", "py.ColumnPoolerRegion",
                      yaml.dump(DEFAULT_TP_PARAMS))

        net.addRegion("classifier", "py.KNNClassifierRegion",
                      yaml.dump(DEFAULT_CLASSIFIER_PARAMS))

        net.link("sensor",
                 "SP",
                 "UniformLink",
                 "",
                 srcOutput="dataOut",
                 destInput="bottomUpIn")
        net.link("SP",
                 "TM",
                 "UniformLink",
                 "",
                 srcOutput="bottomUpOut",
                 destInput="activeColumns")
        net.link("sensor",
                 "TM",
                 "UniformLink",
                 "",
                 srcOutput="saccadeOut",
                 destInput="basalInput")
        net.link("TM",
                 "TP",
                 "UniformLink",
                 "",
                 srcOutput="predictedActiveCells",
                 destInput="feedforwardInput")
        net.link("TP",
                 "TM",
                 "UniformLink",
                 "",
                 srcOutput="feedForwardOutput",
                 destInput="apicalInput")
        net.link("TP",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="feedForwardOutput",
                 destInput="bottomUpIn")
        #net.link("TM", "classifier", "UniformLink", "",
        #         srcOutput="predictedActiveCells", destInput="bottomUpIn")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="categoryOut",
                 destInput="categoryIn")

        self.net = net
        self.networkSensor = self.net.regions["sensor"]
        self.networkSP = self.net.regions["SP"]
        self.networkTM = self.net.regions["TM"]
        self.networkTP = self.net.regions["TP"]
        self.networkClassifier = self.net.regions["classifier"]

    def loadFromFile(self, filename):
        """ Load a serialized network
    :param filename: Where the network should be loaded from
    """
        print "Loading network from {file}...".format(file=filename)
        Network.unregisterRegion(SaccadeSensor.__name__)
        Network.registerRegion(SaccadeSensor)

        Network.registerRegion(ExtendedTMRegion)

        self.net = Network(filename)

        self.networkSensor = self.net.regions["sensor"]
        self.networkSensor.setParameter("numSaccades",
                                        SACCADES_PER_IMAGE_TESTING)

        self.networkSP = self.net.regions["SP"]
        self.networkClassifier = self.net.regions["classifier"]

        self.numCorrect = 0

    def loadExperiment(self):
        """ Load images into ImageSensor and set the learning mode for the SP. """
        print "============= Loading training images ================="
        t1 = time.time()
        self.networkSensor.executeCommand(
            ["loadMultipleImages", self.trainingSet])
        numTrainingImages = self.networkSensor.getParameter("numImages")
        t2 = time.time()
        print "Load time for training images:", t2 - t1
        print "Number of training images", numTrainingImages

        self.numTrainingImages = numTrainingImages
        self.trainingImageIndex = 0

    def runNetworkOneImage(self, enableViz=False):
        """ Runs a single image through the network stepping through all saccades

    :param bool enableViz: If true, visualizations are generated and returned
    :return: If enableViz, return a tuple (saccadeImgsList, saccadeDetailList,
      saccadeHistList). saccadeImgsList is a list of images with the fovea
      highlighted. saccadeDetailList is a list of resized images showing the
      contents of the fovea. saccadeHistList shows the fovea history.
      If not enableViz, returns True
      Regardless of enableViz, if False is returned, all images have been
      saccaded over.
    """
        if self.trainingImageIndex < self.numTrainingImages:

            saccadeList = []
            saccadeImgsList = []
            saccadeHistList = []
            saccadeDetailList = []
            originalImage = None

            self.networkTM.executeCommand(["reset"])

            for i in range(SACCADES_PER_IMAGE_TRAINING):
                self.net.run(1)
                if originalImage is None:
                    originalImage = deserializeImage(
                        yaml.load(
                            self.networkSensor.getParameter("originalImage")))
                    imgCenter = (
                        originalImage.size[0] / 2,
                        originalImage.size[1] / 2,
                    )
                saccadeList.append({
                    "offset1": (yaml.load(
                        self.networkSensor.getParameter("prevSaccadeInfo"))
                                ["prevOffset"]),
                    "offset2": (yaml.load(
                        self.networkSensor.getParameter("prevSaccadeInfo"))
                                ["newOffset"])
                })

                if enableViz:
                    detailImage = deserializeImage(
                        yaml.load(
                            self.networkSensor.getParameter("outputImage")))
                    detailImage = detailImage.resize(
                        (self.detailedSaccadeWidth,
                         self.detailedSaccadeHeight), Image.ANTIALIAS)
                    saccadeDetailList.append(ImageTk.PhotoImage(detailImage))

                    imgWithSaccade = originalImage.convert("RGB")
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset2"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset2"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2)),
                        fill=(255, 0, 0),
                        width=1)  # Left
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset2"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset2"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2)),
                        fill=(255, 0, 0),
                        width=1)  # Right
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset2"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset2"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2)),
                        fill=(255, 0, 0),
                        width=1)  # Top
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset2"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset2"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2)),
                        fill=(255, 0, 0),
                        width=1)  # Bottom

                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset1"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset1"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2)),
                        fill=(0, 255, 0),
                        width=1)  # Left
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset1"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset1"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2)),
                        fill=(0, 255, 0),
                        width=1)  # Right
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset1"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset1"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2)),
                        fill=(0, 255, 0),
                        width=1)  # Top
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset1"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset1"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2)),
                        fill=(0, 255, 0),
                        width=1)  # Bottom

                    saccadeImgsList.append(ImageTk.PhotoImage(imgWithSaccade))

                    saccadeHist = originalImage.convert("RGB")
                    for i, saccade in enumerate(saccadeList):
                        ImageDraw.Draw(saccadeHist).rectangle(
                            (imgCenter[0] + saccade["offset2"][0] -
                             _FOVEA_SIZE / 2, imgCenter[0] +
                             saccade["offset2"][1] - _FOVEA_SIZE / 2,
                             imgCenter[0] + saccade["offset2"][0] +
                             _FOVEA_SIZE / 2, imgCenter[0] +
                             saccade["offset2"][1] + _FOVEA_SIZE / 2),
                            fill=(0, (255 / SACCADES_PER_IMAGE_TRAINING *
                                      (SACCADES_PER_IMAGE_TRAINING - i)),
                                  (255 / SACCADES_PER_IMAGE_TRAINING * i)))
                    saccadeHist = saccadeHist.resize(
                        (self.detailedSaccadeWidth,
                         self.detailedSaccadeHeight), Image.ANTIALIAS)
                    saccadeHistList.append(ImageTk.PhotoImage(saccadeHist))

            self.trainingImageIndex += 1
            print("Iteration: {iter}; Category: {cat}".format(
                iter=self.trainingImageIndex,
                cat=self.networkSensor.getOutputData("categoryOut")))

            if enableViz:
                return (saccadeImgsList, saccadeDetailList, saccadeHistList,
                        self.networkSensor.getOutputData("categoryOut"))
            return True

        else:
            return False

    def runNetworkBatch(self, batchSize):
        """ Run the network in batches.

    :param batchSize: Number of images to show in this batch
    :return: True if there are more images left to be saccaded over.
      Otherwise False.
    """
        startTime = time.time()
        while self.trainingImageIndex < self.numTrainingImages:
            self.networkTM.executeCommand(["reset"])
            for i in range(SACCADES_PER_IMAGE_TRAINING):
                self.net.run(1)

            self.trainingImageIndex += 1
            if self.trainingImageIndex % batchSize == 0:
                print(
                    "Iteration: {iter}; Category: {cat}; Time per batch: {t}".
                    format(iter=self.trainingImageIndex,
                           cat=self.networkSensor.getOutputData("categoryOut"),
                           t=time.time() - startTime))
                return True
        return False

    def setupNetworkTest(self):
        self.networkSensor.executeCommand(
            ["loadMultipleImages", self.testingSet])
        self.numTestingImages = self.networkSensor.getParameter("numImages")
        self.testingImageIndex = 0

        print "NumTestingImages {test}".format(test=self.numTestingImages)

    def testNetworkOneImage(self, enableViz=False):
        if self.testingImageIndex < self.numTestingImages:
            saccadeList = []
            saccadeImgsList = []
            saccadeHistList = []
            saccadeDetailList = []
            inferredCategoryList = []
            originalImage = None

            self.networkTM.executeCommand(["reset"])
            for i in range(SACCADES_PER_IMAGE_TESTING):
                self.net.run(1)
                if originalImage is None:
                    originalImage = deserializeImage(
                        yaml.load(
                            self.networkSensor.getParameter("originalImage")))
                    imgCenter = (
                        originalImage.size[0] / 2,
                        originalImage.size[1] / 2,
                    )
                saccadeList.append({
                    "offset1": (yaml.load(
                        self.networkSensor.getParameter("prevSaccadeInfo"))
                                ["prevOffset"]),
                    "offset2": (yaml.load(
                        self.networkSensor.getParameter("prevSaccadeInfo"))
                                ["newOffset"])
                })
                inferredCategoryList.append(
                    self.networkClassifier.getOutputData(
                        "categoriesOut").argmax())

                if enableViz:
                    detailImage = deserializeImage(
                        yaml.load(
                            self.networkSensor.getParameter("outputImage")))
                    detailImage = detailImage.resize(
                        (self.detailedSaccadeWidth,
                         self.detailedSaccadeHeight), Image.ANTIALIAS)
                    saccadeDetailList.append(ImageTk.PhotoImage(detailImage))

                    imgWithSaccade = originalImage.convert("RGB")
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset2"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset2"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2)),
                        fill=(255, 0, 0),
                        width=1)  # Left
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset2"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset2"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2)),
                        fill=(255, 0, 0),
                        width=1)  # Right
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset2"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset2"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2)),
                        fill=(255, 0, 0),
                        width=1)  # Top
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset2"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset2"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2)),
                        fill=(255, 0, 0),
                        width=1)  # Bottom

                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset1"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset1"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2)),
                        fill=(0, 255, 0),
                        width=1)  # Left
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset1"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset1"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2)),
                        fill=(0, 255, 0),
                        width=1)  # Right
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset1"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset1"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2)),
                        fill=(0, 255, 0),
                        width=1)  # Top
                    ImageDraw.Draw(imgWithSaccade).line(
                        (imgCenter[0] + saccadeList[i]["offset1"][0] +
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2),
                         imgCenter[0] + saccadeList[i]["offset1"][0] -
                         (_FOVEA_SIZE / 2), imgCenter[1] +
                         saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2)),
                        fill=(0, 255, 0),
                        width=1)  # Bottom

                    saccadeImgsList.append(ImageTk.PhotoImage(imgWithSaccade))

                    saccadeHist = originalImage.convert("RGB")
                    for i, saccade in enumerate(saccadeList):
                        ImageDraw.Draw(saccadeHist).rectangle(
                            (imgCenter[0] + saccade["offset2"][0] -
                             _FOVEA_SIZE / 2, imgCenter[0] +
                             saccade["offset2"][1] - _FOVEA_SIZE / 2,
                             imgCenter[0] + saccade["offset2"][0] +
                             _FOVEA_SIZE / 2, imgCenter[0] +
                             saccade["offset2"][1] + _FOVEA_SIZE / 2),
                            fill=(0, (255 / SACCADES_PER_IMAGE_TESTING *
                                      (SACCADES_PER_IMAGE_TESTING - i)),
                                  (255 / SACCADES_PER_IMAGE_TESTING * i)))
                    saccadeHist = saccadeHist.resize(
                        (self.detailedSaccadeWidth,
                         self.detailedSaccadeHeight), Image.ANTIALIAS)
                    saccadeHistList.append(ImageTk.PhotoImage(saccadeHist))

            inferredCategory = self._getMostCommonCategory(
                inferredCategoryList)
            isCorrectClassification = False
            if self.networkSensor.getOutputData(
                    "categoryOut") == inferredCategory:
                isCorrectClassification = True
                self.numCorrect += 1
            print("Iteration: {iter}; Category: {cat}".format(
                iter=self.testingImageIndex,
                cat=self.networkSensor.getOutputData("categoryOut")))
            self.testingImageIndex += 1

            if enableViz:
                return (saccadeImgsList, saccadeDetailList, saccadeHistList,
                        inferredCategoryList,
                        self.networkSensor.getOutputData("categoryOut"),
                        isCorrectClassification)
            return (True, isCorrectClassification)

        else:
            return False

    def testNetworkBatch(self, batchSize):
        if self.testingImageIndex >= self.numTestingImages:
            return False

        while self.testingImageIndex < self.numTestingImages:
            inferredCategoryList = []
            self.networkTM.executeCommand(["reset"])
            for i in range(SACCADES_PER_IMAGE_TESTING):
                self.net.run(1)
                inferredCategoryList.append(
                    self.networkClassifier.getOutputData(
                        "categoriesOut").argmax())
            inferredCategory = self._getMostCommonCategory(
                inferredCategoryList)
            if self.networkSensor.getOutputData(
                    "categoryOut") == inferredCategory:
                self.numCorrect += 1

            self.testingImageIndex += 1

            if self.testingImageIndex % batchSize == 0:
                print("Testing iteration: {iter}".format(
                    iter=self.testingImageIndex))
                break

        return self.numCorrect

    @staticmethod
    def _getMostCommonCategory(categoryList):
        return collections.Counter(categoryList).most_common(1)[0][0]

    def setLearningMode(self,
                        learningSP=False,
                        learningTM=False,
                        learningTP=False,
                        learningClassifier=False):
        if learningSP:
            self.networkSP.setParameter("learningMode", 1)
            self.networkSP.setParameter("inferenceMode", 0)
        else:
            self.networkSP.setParameter("learningMode", 0)
            self.networkSP.setParameter("inferenceMode", 1)

        if learningTM:
            self.networkTM.setParameter("learn", 1)
        else:
            self.networkTM.setParameter("learn", 0)

        if learningTM:
            self.networkTP.setParameter("learningMode", 1)
        else:
            self.networkTP.setParameter("learningMode", 0)

        if learningClassifier:
            self.networkClassifier.setParameter("learningMode", 1)
            self.networkClassifier.setParameter("inferenceMode", 0)
        else:
            self.networkClassifier.setParameter("learningMode", 0)
            self.networkClassifier.setParameter("inferenceMode", 1)

    def saveNetwork(self):
        print "Saving network at {path}".format(path=self.netFile)
        self.net.save(self.netFile)

    def resetIndex(self):
        self.trainingImageIndex = 0
Ejemplo n.º 21
0
class HTMusicModel(object):
    def __init__(self, model_params):
        # Init an HTM network
        self.network = Network()

        # Getting parameters for network regions
        self.sensor_params = model_params['Sensor']
        self.spatial_pooler_params = model_params['SpatialPooler']
        self.temporal_memory_params = model_params['TemporalMemory']
        self.classifiers_params = model_params['Classifiers']
        self.encoders_params = model_params['Encoders']

        # Adding regions to HTM network
        self.network.addRegion('DurationEncoder', 'ScalarSensor',
                               json.dumps(self.encoders_params['duration']))
        self.network.addRegion('VelocityEncoder', 'ScalarSensor',
                               json.dumps(self.encoders_params['pitch']))
        self.network.addRegion('PitchEncoder', 'ScalarSensor',
                               json.dumps(self.encoders_params['velocity']))

        self.network.addRegion('SpatialPooler', 'py.SPRegion',
                               json.dumps(self.spatial_pooler_params))
        self.network.addRegion('TemporalMemory', 'py.TMRegion',
                               json.dumps(self.temporal_memory_params))

        # Creating outer classifiers for multifield prediction
        dclp = self.classifiers_params['duration']
        vclp = self.classifiers_params['pitch']
        pclp = self.classifiers_params['velocity']

        self.duration_classifier = SDRClassifier(
            steps=(1, ),
            verbosity=dclp['verbosity'],
            alpha=dclp['alpha'],
            actValueAlpha=dclp['actValueAlpha'])
        self.velocity_classifier = SDRClassifier(
            steps=(1, ),
            verbosity=vclp['verbosity'],
            alpha=vclp['alpha'],
            actValueAlpha=vclp['actValueAlpha'])
        self.pitch_classifier = SDRClassifier(
            steps=(1, ),
            verbosity=pclp['verbosity'],
            alpha=pclp['alpha'],
            actValueAlpha=pclp['actValueAlpha'])

        self._link_all_regions()
        self._enable_learning()
        self._enable_inference()

        self.network.initialize()

    def _link_all_regions(self):
        # Linking regions
        self.network.link('DurationEncoder', 'SpatialPooler', 'UniformLink',
                          '')
        self.network.link('VelocityEncoder', 'SpatialPooler', 'UniformLink',
                          '')
        self.network.link('PitchEncoder', 'SpatialPooler', 'UniformLink', '')
        self.network.link('SpatialPooler',
                          'TemporalMemory',
                          'UniformLink',
                          '',
                          srcOutput='bottomUpOut',
                          destInput='bottomUpIn')

    def _enable_learning(self):
        # Enable learning for all regions.
        self.network.regions["SpatialPooler"].setParameter("learningMode", 1)
        self.network.regions["TemporalMemory"].setParameter("learningMode", 1)

    def _enable_inference(self):
        # Enable inference for all regions.
        self.network.regions["SpatialPooler"].setParameter("inferenceMode", 1)
        self.network.regions["TemporalMemory"].setParameter("inferenceMode", 1)

    def train(self, duration, pitch, velocity):
        records_total = self.network.regions['SpatialPooler'].getSelf(
        ).getAlgorithmInstance().getIterationNum()

        self.network.regions['DurationEncoder'].setParameter(
            'sensedValue', duration)
        self.network.regions['PitchEncoder'].setParameter('sensedValue', pitch)
        self.network.regions['VelocityEncoder'].setParameter(
            'sensedValue', velocity)
        self.network.run(1)

        # Getting active cells of TM and bucket indicies of encoders to feed classifiers
        active_cells = numpy.array(
            self.network.regions['TemporalMemory'].getOutputData(
                'bottomUpOut')).nonzero()[0]
        duration_bucket = numpy.array(
            self.network.regions['DurationEncoder'].getOutputData('bucket'))
        pitch_bucket = numpy.array(
            self.network.regions['PitchEncoder'].getOutputData('bucket'))
        velocity_bucket = numpy.array(
            self.network.regions['VelocityEncoder'].getOutputData('bucket'))

        duration_classifier_result = self.duration_classifier.compute(
            recordNum=records_total,
            patternNZ=active_cells,
            classification={
                'bucketIdx': duration_bucket[0],
                'actValue': duration
            },
            learn=True,
            infer=False)

        pitch_classifier_result = self.pitch_classifier.compute(
            recordNum=records_total,
            patternNZ=active_cells,
            classification={
                'bucketIdx': pitch_bucket[0],
                'actValue': pitch
            },
            learn=True,
            infer=False)

        velocity_classifier_result = self.velocity_classifier.compute(
            recordNum=records_total,
            patternNZ=active_cells,
            classification={
                'bucketIdx': velocity_bucket[0],
                'actValue': velocity
            },
            learn=True,
            infer=False)

    def generate(self, seed, output_dir, event_amount):
        records_total = self.network.regions['SpatialPooler'].getSelf(
        ).getAlgorithmInstance().getIterationNum()

        seed = seed

        midi = pretty_midi.PrettyMIDI()
        midi_program = pretty_midi.instrument_name_to_program(
            'Acoustic Grand Piano')
        piano = pretty_midi.Instrument(program=midi_program)
        clock = 0
        for iters in tqdm(range(records_total, records_total + event_amount)):
            duration = seed[0]
            pitch = seed[1]
            velocity = seed[2]

            self.network.regions['DurationEncoder'].setParameter(
                'sensedValue', duration)
            self.network.regions['PitchEncoder'].setParameter(
                'sensedValue', pitch)
            self.network.regions['VelocityEncoder'].setParameter(
                'sensedValue', velocity)
            self.network.run(1)

            # Getting active cells of TM and bucket indicies of encoders to feed classifiers
            active_cells = numpy.array(
                self.network.regions['TemporalMemory'].getOutputData(
                    'bottomUpOut')).nonzero()[0]

            duration_bucket = numpy.array(
                self.network.regions['DurationEncoder'].getOutputData(
                    'bucket'))

            pitch_bucket = numpy.array(
                self.network.regions['PitchEncoder'].getOutputData('bucket'))

            velocity_bucket = numpy.array(
                self.network.regions['VelocityEncoder'].getOutputData(
                    'bucket'))

            # Getting up classifiers result

            duration_classifier_result = self.duration_classifier.compute(
                recordNum=records_total,
                patternNZ=active_cells,
                classification={
                    'bucketIdx': duration_bucket[0],
                    'actValue': duration
                },
                learn=False,
                infer=True)

            pitch_classifier_result = self.pitch_classifier.compute(
                recordNum=records_total,
                patternNZ=active_cells,
                classification={
                    'bucketIdx': pitch_bucket[0],
                    'actValue': pitch
                },
                learn=False,
                infer=True)

            velocity_classifier_result = self.velocity_classifier.compute(
                recordNum=records_total,
                patternNZ=active_cells,
                classification={
                    'bucketIdx': velocity_bucket[0],
                    'actValue': velocity
                },
                learn=False,
                infer=True)

            du = duration_classifier_result[1].argmax()
            pi = pitch_classifier_result[1].argmax()
            ve = velocity_classifier_result[1].argmax()

            duration_top_probs = duration_classifier_result[1][
                0:2] / duration_classifier_result[1][0:2].sum()

            predicted_duration = duration_classifier_result['actualValues'][du]

            # predicted_duration = duration_classifier_result['actualValues'][du]
            predicted_pitch = pitch_classifier_result['actualValues'][pi]
            predicted_velocity = velocity_classifier_result['actualValues'][ve]

            # print duration_classifier_result

            note = pretty_midi.Note(velocity=int(predicted_velocity),
                                    pitch=int(predicted_pitch),
                                    start=float(clock),
                                    end=float(clock + predicted_duration))

            piano.notes.append(note)

            clock = clock + 0.25

            seed[0] = predicted_duration
            seed[1] = predicted_pitch
            seed[2] = predicted_velocity

        midi.instruments.append(piano)
        midi.remove_invalid_notes()
        time = datetime.datetime.now().strftime('%Y-%m-%d %H:%m:%S')
        midi.write(output_dir + time + '.mid')

    def load_model(self, load_path):

        # Loading SpatialPooler
        print 'Loading SpatialPooler'
        with open(load_path + 'sp.bin', 'rb') as sp:
            sp_builder = SpatialPoolerProto.read(
                sp, traversal_limit_in_words=2**61)
        self.network.regions['SpatialPooler'].getSelf(
        )._sfdr = self.network.regions['SpatialPooler'].getSelf()._sfdr.read(
            sp_builder)

        # Loading TemporalMemory
        print 'Loading TemporalMemory'
        self.network.regions['TemporalMemory'].getSelf().getAlgorithmInstance(
        ).loadFromFile(load_path + 'tm.bin')

        # Loading end classifier
        print 'Loading duration classifier'
        with open(load_path + 'dcl.bin', 'rb') as dcl:
            dcl_builder = SdrClassifierProto.read(
                dcl, traversal_limit_in_words=2**61)
        self.duration_classifier = self.duration_classifier.read(dcl_builder)

        # Loading pitch classifier
        print 'Loading pitch classifier'
        with open(load_path + 'pcl.bin', 'rb') as pcl:
            pcl_builder = SdrClassifierProto.read(
                pcl, traversal_limit_in_words=2**61)
        self.pitch_classifier = self.pitch_classifier.read(pcl_builder)

        # Loading velocity classifier
        print 'Loading velocity classifier'
        with open(load_path + 'vcl.bin', 'rb') as vcl:
            vcl_builder = SdrClassifierProto.read(
                vcl, traversal_limit_in_words=2**61)
        self.velocity_classifier = self.velocity_classifier.read(vcl_builder)

    def save_model(self, save_path):

        # Saving SpatialPooler
        print 'Saving SpatialPooler'
        sp_builder = SpatialPoolerProto.new_message()
        self.network.regions['SpatialPooler'].getSelf().getAlgorithmInstance(
        ).write(sp_builder)
        with open(save_path + 'sp.bin', 'w+b') as sp:
            sp_builder.write(sp)

        # Saving TemporalMemory
        print 'Saving TemporalMemory'
        self.network.regions['TemporalMemory'].getSelf().getAlgorithmInstance(
        ).saveToFile(save_path + 'tm.bin')

        # Saving end classifier
        print 'Saving duration classifier'
        dcl_builder = SdrClassifierProto.new_message()
        self.duration_classifier.write(dcl_builder)
        with open(save_path + 'dcl.bin', 'w+b') as dcl:
            dcl_builder.write(dcl)

        # Saving pitch classifier
        print 'Saving pitch classifier'
        pcl_builder = SdrClassifierProto.new_message()
        self.pitch_classifier.write(pcl_builder)
        with open(save_path + 'pcl.bin', 'w+b') as pcl:
            pcl_builder.write(pcl)

        # Saving velocity classifier
        print 'Saving velocity classifier'
        vcl_builder = SdrClassifierProto.new_message()
        self.velocity_classifier.write(vcl_builder)
        with open(save_path + 'vcl.bin', 'w+b') as vcl:
            vcl_builder.write(vcl)
Ejemplo n.º 22
0
    def testSimpleImageNetwork(self):

        # Create the network and get region instances
        net = Network()
        net.addRegion("sensor", "py.ImageSensor", "{width: 32, height: 32}")
        net.addRegion("classifier", "py.KNNClassifierRegion",
                      "{distThreshold: 0.01, maxCategoryCount: 2}")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="dataOut",
                 destInput="bottomUpIn")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="categoryOut",
                 destInput="categoryIn")
        net.initialize()
        sensor = net.regions['sensor']
        classifier = net.regions['classifier']

        # Create a dataset with two categories, one image in each category
        # Each image consists of a unique rectangle
        tmpDir = tempfile.mkdtemp()
        os.makedirs(os.path.join(tmpDir, '0'))
        os.makedirs(os.path.join(tmpDir, '1'))

        im0 = Image.new("L", (32, 32))
        draw = ImageDraw.Draw(im0)
        draw.rectangle((10, 10, 20, 20), outline=255)
        im0.save(os.path.join(tmpDir, '0', 'im0.png'))

        im1 = Image.new("L", (32, 32))
        draw = ImageDraw.Draw(im1)
        draw.rectangle((15, 15, 25, 25), outline=255)
        im1.save(os.path.join(tmpDir, '1', 'im1.png'))

        # Load the dataset
        sensor.executeCommand(["loadMultipleImages", tmpDir])
        numImages = sensor.getParameter('numImages')
        self.assertEqual(numImages, 2)

        # Ensure learning is turned ON
        self.assertEqual(classifier.getParameter('learningMode'), 1)

        # Train the network (by default learning is ON in the classifier)
        # and then turn off learning and turn on inference mode
        net.run(2)
        classifier.setParameter('inferenceMode', 1)
        classifier.setParameter('learningMode', 0)

        # Check to make sure learning is turned OFF and that the classifier learned
        # something
        self.assertEqual(classifier.getParameter('learningMode'), 0)
        self.assertEqual(classifier.getParameter('inferenceMode'), 1)
        self.assertEqual(classifier.getParameter('categoryCount'), 2)
        self.assertEqual(classifier.getParameter('patternCount'), 2)

        # Now test the network to make sure it categories the images correctly
        numCorrect = 0
        for i in range(2):
            net.run(1)
            inferredCategory = classifier.getOutputData(
                'categoriesOut').argmax()
            if sensor.getOutputData('categoryOut') == inferredCategory:
                numCorrect += 1

        self.assertEqual(numCorrect, 2)

        # Cleanup the temp files
        os.unlink(os.path.join(tmpDir, '0', 'im0.png'))
        os.unlink(os.path.join(tmpDir, '1', 'im1.png'))
        os.removedirs(os.path.join(tmpDir, '0'))
        os.removedirs(os.path.join(tmpDir, '1'))
Ejemplo n.º 23
0
  def testTwoNode(self):
    # =====================================================
    # Build and run the network
    # =====================================================

    net = Network()
    level1 = net.addRegion("level1", "TestNode", "{int32Param: 15}")
    dims = Dimensions([6, 4])
    level1.setDimensions(dims)

    level2 = net.addRegion("level2", "TestNode", "{real64Param: 128.23}")

    net.link("level1", "level2", "TestFanIn2", "")

    # Could call initialize here, but not necessary as net.run()
    # initializes implicitly.
    # net.initialize()

    net.run(1)
    LOGGER.info("Successfully created network and ran for one iteration")

    # =====================================================
    # Check everything
    # =====================================================
    dims = level1.getDimensions()
    self.assertEquals(len(dims), 2)
    self.assertEquals(dims[0], 6)
    self.assertEquals(dims[1], 4)

    dims = level2.getDimensions()
    self.assertEquals(len(dims), 2)
    self.assertEquals(dims[0], 3)
    self.assertEquals(dims[1], 2)

    # Check L1 output. "False" means don't copy, i.e.
    # get a pointer to the actual output
    # Actual output values are determined by the TestNode
    # compute() behavior.
    l1output = level1.getOutputData("bottomUpOut")
    self.assertEquals(len(l1output), 48) # 24 nodes; 2 values per node
    for i in xrange(24):
      self.assertEquals(l1output[2*i], 0)      # size of input to each node is 0
      self.assertEquals(l1output[2*i+1], i)    # node number

    # check L2 output.
    l2output = level2.getOutputData("bottomUpOut", )
    self.assertEquals(len(l2output), 12) # 6 nodes; 2 values per node
    # Output val = node number + sum(inputs)
    # Can compute from knowing L1 layout
    #
    #  00 01 | 02 03 | 04 05
    #  06 07 | 08 09 | 10 11
    #  ---------------------
    #  12 13 | 14 15 | 16 17
    #  18 19 | 20 21 | 22 23
    outputVals = []
    outputVals.append(0 + (0 + 1 + 6 + 7))
    outputVals.append(1 + (2 + 3 + 8 + 9))
    outputVals.append(2 + (4 + 5 + 10 + 11))
    outputVals.append(3 + (12 + 13 + 18 + 19))
    outputVals.append(4 + (14 + 15 + 20 + 21))
    outputVals.append(5 + (16 + 17 + 22 + 23))
    for i in xrange(6):
      self.assertEquals(l2output[2*i], 8) # size of input for each node is 8
      self.assertEquals(l2output[2*i+1], outputVals[i])


    # =====================================================
    # Run for one more iteration
    # =====================================================
    LOGGER.info("Running for a second iteration")
    net.run(1)

    # =====================================================
    # Check everything again
    # =====================================================

    # Outputs are all the same except that the first output is
    # incremented by the iteration number
    for i in xrange(24):
      self.assertEquals(l1output[2*i], 1)
      self.assertEquals(l1output[2*i+1], i)

    for i in xrange(6):
      self.assertEquals(l2output[2*i], 9)
      self.assertEquals(l2output[2*i+1], outputVals[i] + 4)
Ejemplo n.º 24
0
  def testSimpleImageNetwork(self):

    # Create the network and get region instances
    net = Network()
    net.addRegion("sensor", "py.ImageSensor", "{width: 32, height: 32}")
    net.addRegion("classifier","py.KNNClassifierRegion",
                  "{distThreshold: 0.01, maxCategoryCount: 2}")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput = "dataOut", destInput = "bottomUpIn")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput = "categoryOut", destInput = "categoryIn")
    net.initialize()
    sensor = net.regions['sensor']
    classifier = net.regions['classifier']

    # Create a dataset with two categories, one image in each category
    # Each image consists of a unique rectangle
    tmpDir = tempfile.mkdtemp()
    os.makedirs(os.path.join(tmpDir,'0'))
    os.makedirs(os.path.join(tmpDir,'1'))

    im0 = Image.new("L",(32,32))
    draw = ImageDraw.Draw(im0)
    draw.rectangle((10,10,20,20), outline=255)
    im0.save(os.path.join(tmpDir,'0','im0.png'))

    im1 = Image.new("L",(32,32))
    draw = ImageDraw.Draw(im1)
    draw.rectangle((15,15,25,25), outline=255)
    im1.save(os.path.join(tmpDir,'1','im1.png'))

    # Load the dataset
    sensor.executeCommand(["loadMultipleImages", tmpDir])
    numImages = sensor.getParameter('numImages')
    self.assertEqual(numImages, 2)

    # Ensure learning is turned ON
    self.assertEqual(classifier.getParameter('learningMode'), 1)

    # Train the network (by default learning is ON in the classifier)
    # and then turn off learning and turn on inference mode
    net.run(2)
    classifier.setParameter('inferenceMode', 1)
    classifier.setParameter('learningMode', 0)

    # Check to make sure learning is turned OFF and that the classifier learned
    # something
    self.assertEqual(classifier.getParameter('learningMode'), 0)
    self.assertEqual(classifier.getParameter('inferenceMode'), 1)
    self.assertEqual(classifier.getParameter('categoryCount'),2)
    self.assertEqual(classifier.getParameter('patternCount'),2)

    # Now test the network to make sure it categories the images correctly
    numCorrect = 0
    for i in range(2):
      net.run(1)
      inferredCategory = classifier.getOutputData('categoriesOut').argmax()
      if sensor.getOutputData('categoryOut') == inferredCategory:
        numCorrect += 1

    self.assertEqual(numCorrect,2)

    # Cleanup the temp files
    os.unlink(os.path.join(tmpDir,'0','im0.png'))
    os.unlink(os.path.join(tmpDir,'1','im1.png'))
    os.removedirs(os.path.join(tmpDir,'0'))
    os.removedirs(os.path.join(tmpDir,'1'))
Ejemplo n.º 25
0
    def testSimpleMulticlassNetwork(self):

        # Setup data record stream of fake data (with three categories)
        filename = _getTempFileName()
        fields = [("timestamp", "datetime", "T"), ("value", "float", ""),
                  ("reset", "int", "R"), ("sid", "int", "S"),
                  ("categories", "list", "C")]
        records = ([datetime(day=1, month=3, year=2010), 0.0, 1, 0, ""], [
            datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1 2"
        ], [datetime(day=3, month=3, year=2010), 1.0, 0, 0,
            "1 2"], [datetime(day=4, month=3, year=2010), 2.0, 0, 0, "0"], [
                datetime(day=5, month=3, year=2010), 3.0, 0, 0, "1 2"
            ], [datetime(day=6, month=3, year=2010), 5.0, 0, 0,
                "1 2"], [datetime(day=7, month=3, year=2010), 8.0, 0, 0, "0"],
                   [datetime(day=8, month=3, year=2010), 13.0, 0, 0, "1 2"])
        dataSource = FileRecordStream(streamID=filename,
                                      write=True,
                                      fields=fields)
        for r in records:
            dataSource.appendRecord(list(r))

        # Create the network and get region instances.
        net = Network()
        net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
        net.addRegion("classifier", "py.KNNClassifierRegion",
                      "{'k': 2,'distThreshold': 0,'maxCategoryCount': 3}")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="dataOut",
                 destInput="bottomUpIn")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="categoryOut",
                 destInput="categoryIn")
        sensor = net.regions["sensor"]
        classifier = net.regions["classifier"]

        # Setup sensor region encoder and data stream.
        dataSource.close()
        dataSource = FileRecordStream(filename)
        sensorRegion = sensor.getSelf()
        sensorRegion.encoder = MultiEncoder()
        sensorRegion.encoder.addEncoder(
            "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
        sensorRegion.dataSource = dataSource

        # Get ready to run.
        net.initialize()

        # Train the network (by default learning is ON in the classifier, but assert
        # anyway) and then turn off learning and turn on inference mode.
        self.assertEqual(classifier.getParameter("learningMode"), 1)
        net.run(8)
        classifier.setParameter("inferenceMode", 1)
        classifier.setParameter("learningMode", 0)

        # Assert learning is OFF and that the classifier learned the dataset.
        self.assertEqual(classifier.getParameter("learningMode"), 0,
                         "Learning mode is not turned off.")
        self.assertEqual(classifier.getParameter("inferenceMode"), 1,
                         "Inference mode is not turned on.")
        self.assertEqual(
            classifier.getParameter("categoryCount"), 3,
            "The classifier should count three total categories.")
        # classififer learns 12 patterns b/c there are 12 categories amongst the
        # records:
        self.assertEqual(
            classifier.getParameter("patternCount"), 12,
            "The classifier should've learned 12 samples in total.")

        # Test the network on the same data as it trained on; should classify with
        # 100% accuracy.
        expectedCats = ([0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5],
                        [0.5, 0.5, 0.0], [0.0, 0.5,
                                          0.5], [0.0, 0.5,
                                                 0.5], [0.5, 0.5,
                                                        0.0], [0.0, 0.5, 0.5])
        dataSource.rewind()
        for i in xrange(8):
            net.run(1)
            inferredCats = classifier.getOutputData("categoriesOut")
            self.assertSequenceEqual(
                expectedCats[i], inferredCats.tolist(),
                "Classififer did not infer expected category probabilites for record "
                "number {}.".format(i))

        # Close data stream, delete file.
        dataSource.close()
        os.remove(filename)
  def testSensor(self):
    # Create a simple network to test the sensor
    params = {
      "activeBits": self.encoder.w,
      "outputWidth": self.encoder.n,
      "radius": 2,
      "verbosity": self.encoder.verbosity,
    }
    net = Network()
    region = net.addRegion("coordinate", "py.CoordinateSensorRegion",
                           json.dumps(params))
    vfe = net.addRegion("output", "VectorFileEffector", "")
    net.link("coordinate", "output", "UniformLink", "")

    self.assertEqual(region.getParameter("outputWidth"),
                     self.encoder.n, "Incorrect outputWidth parameter")

    # Add vectors to the queue using two different add methods. Later we
    # will check to ensure these are actually output properly.
    region.executeCommand(["addDataToQueue", "[2, 4, 6]", "0", "42"])
    regionPy = region.getSelf()
    regionPy.addDataToQueue([2, 42, 1023], 1, 43)
    regionPy.addDataToQueue([18, 19, 20], 0, 44)

    # Set an output file before we run anything
    vfe.setParameter("outputFile", os.path.join(self.tmpDir, "temp.csv"))

    # Run the network and check outputs are as expected
    net.run(1)
    expected = self.encoder.encode((numpy.array([2, 4, 6]), params["radius"]))
    actual = region.getOutputData("dataOut")
    self.assertEqual(actual.sum(), expected.sum(), "Value of dataOut incorrect")
    self.assertEqual(region.getOutputData("resetOut"), 0,
                     "Value of resetOut incorrect")
    self.assertEqual(region.getOutputData("sequenceIdOut"), 42,
                     "Value of sequenceIdOut incorrect")

    net.run(1)
    expected = self.encoder.encode((numpy.array([2, 42, 1023]),
                                    params["radius"]))
    actual = region.getOutputData("dataOut")
    self.assertEqual(actual.sum(), expected.sum(), "Value of dataOut incorrect")
    self.assertEqual(region.getOutputData("resetOut"), 1,
                     "Value of resetOut incorrect")
    self.assertEqual(region.getOutputData("sequenceIdOut"), 43,
                     "Value of sequenceIdOut incorrect")

    # Make sure we can save and load the network after running
    net.save(os.path.join(self.tmpDir, "coordinateNetwork.nta"))
    net2 = Network(os.path.join(self.tmpDir, "coordinateNetwork.nta"))
    region2 = net2.regions.get("coordinate")
    vfe2 = net2.regions.get("output")

    # Ensure parameters are preserved
    self.assertEqual(region2.getParameter("outputWidth"), self.encoder.n,
                     "Incorrect outputWidth parameter")

    # Ensure the queue is preserved through save/load
    vfe2.setParameter("outputFile", os.path.join(self.tmpDir, "temp.csv"))
    net2.run(1)
    expected = self.encoder.encode((numpy.array([18, 19, 20]),
                                    params["radius"]))
    actual = region2.getOutputData("dataOut")
    self.assertEqual(actual.sum(), expected.sum(), "Value of dataOut incorrect")
    self.assertEqual(region2.getOutputData("resetOut"), 0,
                     "Value of resetOut incorrect")

    self.assertEqual(region2.getOutputData("sequenceIdOut"), 44,
                     "Value of sequenceIdOut incorrect")
    def testSimpleMulticlassNetworkPY(self):
        # Setup data record stream of fake data (with three categories)
        filename = _getTempFileName()
        fields = [("timestamp", "datetime", "T"), ("value", "float", ""),
                  ("reset", "int", "R"), ("sid", "int", "S"),
                  ("categories", "list", "C")]
        records = ([datetime(day=1, month=3, year=2010), 0.0, 1, 0, "0"], [
            datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1"
        ], [datetime(day=3, month=3, year=2010), 0.0, 0, 0,
            "0"], [datetime(day=4, month=3, year=2010), 1.0, 0, 0,
                   "1"], [datetime(day=5, month=3, year=2010), 0.0, 0, 0, "0"],
                   [datetime(day=6, month=3, year=2010), 1.0, 0, 0, "1"
                    ], [datetime(day=7, month=3, year=2010), 0.0, 0, 0, "0"],
                   [datetime(day=8, month=3, year=2010), 1.0, 0, 0, "1"])
        dataSource = FileRecordStream(streamID=filename,
                                      write=True,
                                      fields=fields)
        for r in records:
            dataSource.appendRecord(list(r))

        # Create the network and get region instances.
        net = Network()
        net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
        net.addRegion("classifier", "py.SDRClassifierRegion",
                      "{steps: '0', alpha: 0.001, implementation: 'py'}")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="dataOut",
                 destInput="bottomUpIn")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="categoryOut",
                 destInput="categoryIn")
        sensor = net.regions["sensor"]
        classifier = net.regions["classifier"]

        # Setup sensor region encoder and data stream.
        dataSource.close()
        dataSource = FileRecordStream(filename)
        sensorRegion = sensor.getSelf()
        sensorRegion.encoder = MultiEncoder()
        sensorRegion.encoder.addEncoder(
            "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
        sensorRegion.dataSource = dataSource

        # Get ready to run.
        net.initialize()

        # Train the network (by default learning is ON in the classifier, but assert
        # anyway) and then turn off learning and turn on inference mode.
        self.assertEqual(classifier.getParameter("learningMode"), 1)
        net.run(8)

        # Test the network on the same data as it trained on; should classify with
        # 100% accuracy.
        classifier.setParameter("inferenceMode", 1)
        classifier.setParameter("learningMode", 0)

        # Assert learning is OFF and that the classifier learned the dataset.
        self.assertEqual(classifier.getParameter("learningMode"), 0,
                         "Learning mode is not turned off.")
        self.assertEqual(classifier.getParameter("inferenceMode"), 1,
                         "Inference mode is not turned on.")

        # make sure we can access all the parameters with getParameter
        self.assertEqual(classifier.getParameter("maxCategoryCount"), 2000)
        self.assertAlmostEqual(float(classifier.getParameter("alpha")), 0.001)
        self.assertEqual(int(classifier.getParameter("steps")), 0)
        self.assertTrue(classifier.getParameter("implementation") == "py")
        self.assertEqual(classifier.getParameter("verbosity"), 0)

        expectedCats = (
            [0.0],
            [1.0],
            [0.0],
            [1.0],
            [0.0],
            [1.0],
            [0.0],
            [1.0],
        )
        dataSource.rewind()
        for i in xrange(8):
            net.run(1)
            inferredCats = classifier.getOutputData("categoriesOut")
            self.assertSequenceEqual(
                expectedCats[i], inferredCats.tolist(),
                "Classififer did not infer expected category "
                "for record number {}.".format(i))
        # Close data stream, delete file.
        dataSource.close()
        os.remove(filename)
Ejemplo n.º 28
0
class SaccadeNetwork(object):
  """
  A HTM network structured as follows:
  SaccadeSensor (RandomSaccade) -> SP -> TM -> Classifier (KNN)
  """
  def __init__(self,
               networkName,
               trainingSet,
               testingSet,
               loggingDir = None,
               validationSet=None,
               detailedSaccadeWidth=IMAGE_WIDTH,
               detailedSaccadeHeight=IMAGE_HEIGHT,
               createNetwork=True):
    """
    :param str networkName: Where the network will be serialized/saved to
    :param str trainingSet: Path to set of images to train on
    :param str testingSet: Path to set of images to test
    :param str loggingDir: directory to store logged images in
      (note: no image logging if none)
    :param validationSet: (optional) Path to set of images to validate on
    :param int detailedSaccadeWidth: (optional) Width of detailed saccades to
      return from the runNetworkOneImage and testNetworkOneImage
    :param int detailedSaccadeHeight: (optional) Height of detailed saccades to
      return from the runNetworkOneImage and testNetworkOneImage
    :param bool createNetwork: If false, wait until createNet is manually
      called to create the network. Otherwise, create on __init__
    """
    self.loggingDir = loggingDir
    self.netFile = networkName
    self.trainingSet = trainingSet
    self.validationSet = validationSet
    self.testingSet = testingSet
    self.detailedSaccadeWidth = detailedSaccadeWidth
    self.detailedSaccadeHeight = detailedSaccadeHeight

    self.net = None
    self.trainingImageIndex = None
    self.networkClassifier = None
    self.networkDutyCycles = None
    self.networkSP = None
    self.networkTM = None
    self.networkSensor = None
    self.numTrainingImages = 0
    self.numTestingImages = 0
    self.trainingImageIndex = 0
    self.testingImageIndex = 0
    self.numCorrect = 0

    if createNetwork:
      self.createNet()


  def createNet(self):
    """ Set up the structure of the network """
    net = Network()

    Network.unregisterRegion(SaccadeSensor.__name__)
    Network.registerRegion(SaccadeSensor)

    Network.registerRegion(TMRegion)

    imageSensorParams = copy.deepcopy(DEFAULT_IMAGESENSOR_PARAMS)
    if self.loggingDir is not None:
      imageSensorParams["logDir"] = "sensorImages/" + self.loggingDir
      imageSensorParams["logOutputImages"] = 1
      imageSensorParams["logOriginalImages"] = 1
      imageSensorParams["logFilteredImages"] = 1
      imageSensorParams["logLocationImages"] = 1
      imageSensorParams["logLocationOnOriginalImage"] = 1

    net.addRegion("sensor", "py.SaccadeSensor",
                  yaml.dump(imageSensorParams))
    sensor = net.regions["sensor"].getSelf()

    DEFAULT_SP_PARAMS["columnCount"] = sensor.getOutputElementCount("dataOut")
    net.addRegion("SP", "py.SPRegion", yaml.dump(DEFAULT_SP_PARAMS))
    sp = net.regions["SP"].getSelf()

    DEFAULT_TM_PARAMS["columnDimensions"] = (sp.getOutputElementCount("bottomUpOut"),)
    net.addRegion("TM", "py.TMRegion", yaml.dump(DEFAULT_TM_PARAMS))

    net.addRegion("classifier","py.KNNClassifierRegion",
                  yaml.dump(DEFAULT_CLASSIFIER_PARAMS))


    net.link("sensor", "SP", "UniformLink", "",
             srcOutput="dataOut", destInput="bottomUpIn")
    net.link("SP", "TM", "UniformLink", "",
             srcOutput="bottomUpOut", destInput="activeColumns")
    net.link("sensor", "TM", "UniformLink", "",
             srcOutput="saccadeOut", destInput="activeExternalCells")
    net.link("TM", "classifier", "UniformLink", "",
             srcOutput="predictedActiveCells", destInput="bottomUpIn")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput="categoryOut", destInput="categoryIn")

    self.net = net
    self.networkSensor = self.net.regions["sensor"]
    self.networkSP = self.net.regions["SP"]
    self.networkTM = self.net.regions["TM"]
    self.networkClassifier = self.net.regions["classifier"]


  def loadFromFile(self, filename):
    """ Load a serialized network
    :param filename: Where the network should be loaded from
    """
    print "Loading network from {file}...".format(file=filename)
    Network.unregisterRegion(SaccadeSensor.__name__)
    Network.registerRegion(SaccadeSensor)

    Network.registerRegion(TMRegion)

    self.net = Network(filename)

    self.networkSensor = self.net.regions["sensor"]
    self.networkSensor.setParameter("numSaccades", SACCADES_PER_IMAGE_TESTING)

    self.networkSP = self.net.regions["SP"]
    self.networkClassifier = self.net.regions["classifier"]

    self.numCorrect = 0

  def loadExperiment(self):
    """ Load images into ImageSensor and set the learning mode for the SP. """
    print "============= Loading training images ================="
    t1 = time.time()
    self.networkSensor.executeCommand(
        ["loadMultipleImages", self.trainingSet])
    numTrainingImages = self.networkSensor.getParameter("numImages")
    t2 = time.time()
    print "Load time for training images:", t2-t1
    print "Number of training images", numTrainingImages

    self.numTrainingImages = numTrainingImages
    self.trainingImageIndex = 0


  def runNetworkOneImage(self, enableViz=False):
    """ Runs a single image through the network stepping through all saccades

    :param bool enableViz: If true, visualizations are generated and returned
    :return: If enableViz, return a tuple (saccadeImgsList, saccadeDetailList,
      saccadeHistList). saccadeImgsList is a list of images with the fovea
      highlighted. saccadeDetailList is a list of resized images showing the
      contents of the fovea. saccadeHistList shows the fovea history.
      If not enableViz, returns True
      Regardless of enableViz, if False is returned, all images have been
      saccaded over.
    """
    if self.trainingImageIndex < self.numTrainingImages:

      saccadeList = []
      saccadeImgsList = []
      saccadeHistList = []
      saccadeDetailList = []
      originalImage = None

      self.networkTM.executeCommand(["reset"])

      for i in range(SACCADES_PER_IMAGE_TRAINING):
        self.net.run(1)
        if originalImage is None:
          originalImage = deserializeImage(
              yaml.load(self.networkSensor.getParameter("originalImage")))
          imgCenter = (originalImage.size[0] / 2,
                       originalImage.size[1] / 2,)
        saccadeList.append({
            "offset1":
                (yaml.load(
                    self.networkSensor
                    .getParameter("prevSaccadeInfo"))
                 ["prevOffset"]),
            "offset2":
                (yaml.load(
                    self.networkSensor
                    .getParameter("prevSaccadeInfo"))
                 ["newOffset"])})

        if enableViz:
          detailImage = deserializeImage(
              yaml.load(self.networkSensor.getParameter("outputImage")))
          detailImage = detailImage.resize((self.detailedSaccadeWidth,
                                            self.detailedSaccadeHeight),
                                           Image.ANTIALIAS)
          saccadeDetailList.append(ImageTk.PhotoImage(detailImage))

          imgWithSaccade = originalImage.convert("RGB")
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset2"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset2"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2)),
              fill=(255, 0, 0), width=1) # Left
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset2"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset2"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2)),
              fill=(255, 0, 0), width=1) # Right
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset2"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset2"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2)),
              fill=(255, 0, 0), width=1) # Top
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset2"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset2"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2)),
              fill=(255, 0, 0), width=1) # Bottom

          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset1"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset1"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2)),
              fill=(0, 255, 0), width=1) # Left
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset1"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset1"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2)),
              fill=(0, 255, 0), width=1) # Right
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset1"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset1"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2)),
              fill=(0, 255, 0), width=1) # Top
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset1"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset1"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2)),
              fill=(0, 255, 0), width=1) # Bottom

          saccadeImgsList.append(ImageTk.PhotoImage(imgWithSaccade))

          saccadeHist = originalImage.convert("RGB")
          for i, saccade in enumerate(saccadeList):
            ImageDraw.Draw(saccadeHist).rectangle(
                (imgCenter[0] + saccade["offset2"][0] - _FOVEA_SIZE/2,
                 imgCenter[0] + saccade["offset2"][1] - _FOVEA_SIZE/2,
                 imgCenter[0] + saccade["offset2"][0] + _FOVEA_SIZE/2,
                 imgCenter[0] + saccade["offset2"][1] + _FOVEA_SIZE/2),
                fill=(0,
                      (255/SACCADES_PER_IMAGE_TRAINING*(SACCADES_PER_IMAGE_TRAINING-i)),
                      (255/SACCADES_PER_IMAGE_TRAINING*i)))
          saccadeHist = saccadeHist.resize((self.detailedSaccadeWidth,
                                            self.detailedSaccadeHeight),
                                           Image.ANTIALIAS)
          saccadeHistList.append(ImageTk.PhotoImage(saccadeHist))

      self.trainingImageIndex += 1
      print ("Iteration: {iter}; Category: {cat}"
             .format(iter=self.trainingImageIndex,
                     cat=self.networkSensor.getOutputData("categoryOut")))

      if enableViz:
        return (saccadeImgsList, saccadeDetailList, saccadeHistList,
                self.networkSensor.getOutputData("categoryOut"))
      return True

    else:
      return False


  def runNetworkBatch(self, batchSize):
    """ Run the network in batches.

    :param batchSize: Number of images to show in this batch
    :return: True if there are more images left to be saccaded over.
      Otherwise False.
    """
    startTime = time.time()
    while self.trainingImageIndex < self.numTrainingImages:
      self.networkTM.executeCommand(["reset"])
      for i in range(SACCADES_PER_IMAGE_TRAINING):
        self.net.run(1)

      self.trainingImageIndex += 1
      if self.trainingImageIndex % batchSize == 0:
        print ("Iteration: {iter}; Category: {cat}; Time per batch: {t}"
               .format(iter=self.trainingImageIndex,
                       cat=self.networkSensor.getOutputData("categoryOut"),
                       t=time.time()-startTime))
        return True
    return False

  def setupNetworkTest(self):
    self.networkSensor.executeCommand(["loadMultipleImages", self.testingSet])
    self.numTestingImages = self.networkSensor.getParameter("numImages")
    self.testingImageIndex = 0

    print "NumTestingImages {test}".format(test=self.numTestingImages)


  def testNetworkOneImage(self, enableViz=False):
    if self.testingImageIndex < self.numTestingImages:
      saccadeList = []
      saccadeImgsList = []
      saccadeHistList = []
      saccadeDetailList = []
      inferredCategoryList = []
      originalImage = None

      self.networkTM.executeCommand(["reset"])
      for i in range(SACCADES_PER_IMAGE_TESTING):
        self.net.run(1)
        if originalImage is None:
          originalImage = deserializeImage(
              yaml.load(self.networkSensor.getParameter("originalImage")))
          imgCenter = (originalImage.size[0] / 2,
                       originalImage.size[1] / 2,)
        saccadeList.append({
            "offset1":
                (yaml.load(
                    self.networkSensor
                    .getParameter("prevSaccadeInfo"))
                 ["prevOffset"]),
            "offset2":
                (yaml.load(
                    self.networkSensor
                    .getParameter("prevSaccadeInfo"))
                 ["newOffset"])})
        inferredCategoryList.append(
            self.networkClassifier.getOutputData("categoriesOut").argmax())

        if enableViz:
          detailImage = deserializeImage(
              yaml.load(self.networkSensor.getParameter("outputImage")))
          detailImage = detailImage.resize((self.detailedSaccadeWidth,
                                            self.detailedSaccadeHeight),
                                           Image.ANTIALIAS)
          saccadeDetailList.append(ImageTk.PhotoImage(detailImage))

          imgWithSaccade = originalImage.convert("RGB")
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset2"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset2"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2)),
              fill=(255, 0, 0), width=1) # Left
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset2"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset2"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2)),
              fill=(255, 0, 0), width=1) # Right
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset2"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset2"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] - (_FOVEA_SIZE / 2)),
              fill=(255, 0, 0), width=1) # Top
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset2"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset2"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset2"][1] + (_FOVEA_SIZE / 2)),
              fill=(255, 0, 0), width=1) # Bottom

          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset1"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset1"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2)),
              fill=(0, 255, 0), width=1) # Left
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset1"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset1"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2)),
              fill=(0, 255, 0), width=1) # Right
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset1"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset1"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] - (_FOVEA_SIZE / 2)),
              fill=(0, 255, 0), width=1) # Top
          ImageDraw.Draw(imgWithSaccade).line(
              (imgCenter[0] + saccadeList[i]["offset1"][0] + (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2),
               imgCenter[0] + saccadeList[i]["offset1"][0] - (_FOVEA_SIZE / 2),
               imgCenter[1] + saccadeList[i]["offset1"][1] + (_FOVEA_SIZE / 2)),
              fill=(0, 255, 0), width=1) # Bottom

          saccadeImgsList.append(ImageTk.PhotoImage(imgWithSaccade))

          saccadeHist = originalImage.convert("RGB")
          for i, saccade in enumerate(saccadeList):
            ImageDraw.Draw(saccadeHist).rectangle(
                (imgCenter[0] + saccade["offset2"][0] - _FOVEA_SIZE/2,
                 imgCenter[0] + saccade["offset2"][1] - _FOVEA_SIZE/2,
                 imgCenter[0] + saccade["offset2"][0] + _FOVEA_SIZE/2,
                 imgCenter[0] + saccade["offset2"][1] + _FOVEA_SIZE/2),
                fill=(0,
                      (255/SACCADES_PER_IMAGE_TESTING*(SACCADES_PER_IMAGE_TESTING-i)),
                      (255/SACCADES_PER_IMAGE_TESTING*i)))
          saccadeHist = saccadeHist.resize((self.detailedSaccadeWidth,
                                            self.detailedSaccadeHeight),
                                           Image.ANTIALIAS)
          saccadeHistList.append(ImageTk.PhotoImage(saccadeHist))

      inferredCategory = self._getMostCommonCategory(inferredCategoryList)
      isCorrectClassification = False
      if self.networkSensor.getOutputData("categoryOut") == inferredCategory:
        isCorrectClassification = True
        self.numCorrect += 1
      print ("Iteration: {iter}; Category: {cat}"
             .format(iter=self.testingImageIndex,
                     cat=self.networkSensor.getOutputData("categoryOut")))
      self.testingImageIndex += 1

      if enableViz:
        return (saccadeImgsList, saccadeDetailList, saccadeHistList,
                inferredCategoryList,
                self.networkSensor.getOutputData("categoryOut"),
                isCorrectClassification)
      return (True, isCorrectClassification)

    else:
      return False


  def testNetworkBatch(self, batchSize):
    if self.testingImageIndex >= self.numTestingImages:
      return False

    while self.testingImageIndex < self.numTestingImages:
      inferredCategoryList = []
      self.networkTM.executeCommand(["reset"])
      for i in range(SACCADES_PER_IMAGE_TESTING):
        self.net.run(1)
        inferredCategoryList.append(
            self.networkClassifier.getOutputData("categoriesOut").argmax())
      inferredCategory = self._getMostCommonCategory(inferredCategoryList)
      if self.networkSensor.getOutputData("categoryOut") == inferredCategory:
        self.numCorrect += 1

      self.testingImageIndex += 1

      if self.testingImageIndex % batchSize == 0:
        print ("Testing iteration: {iter}"
               .format(iter=self.testingImageIndex))
        break

    return self.numCorrect


  @staticmethod
  def _getMostCommonCategory(categoryList):
    return collections.Counter(categoryList).most_common(1)[0][0]


  def setLearningMode(self,
                      learningSP=False,
                      learningTM=False,
                      learningClassifier=False):
    if learningSP:
      self.networkSP.setParameter("learningMode", 1)
      self.networkSP.setParameter("inferenceMode", 0)
    else:
      self.networkSP.setParameter("learningMode", 0)
      self.networkSP.setParameter("inferenceMode", 1)

    if learningTM:
      self.networkTM.setParameter("learningMode", 1)
    else:
      self.networkTM.setParameter("learningMode", 0)

    if learningClassifier:
      self.networkClassifier.setParameter("learningMode", 1)
      self.networkClassifier.setParameter("inferenceMode", 0)
    else:
      self.networkClassifier.setParameter("learningMode", 0)
      self.networkClassifier.setParameter("inferenceMode", 1)


  def saveNetwork(self):
    print "Saving network at {path}".format(path=self.netFile)
    self.net.save(self.netFile)


  def resetIndex(self):
    self.trainingImageIndex = 0
    def testCreateL4L6aLocationColumn(self):
        """
    Test 'createL4L6aLocationColumn' by inferring a set of hand crafted objects
    """
        scale = []
        orientation = []
        # Initialize L6a location region with 5 modules varying scale by sqrt(2) and
        # 4 different random orientations for each scale
        for i in xrange(5):
            for _ in xrange(4):
                angle = np.radians(random.gauss(7.5, 7.5))
                orientation.append(random.choice([angle, -angle]))
                scale.append(10.0 * (math.sqrt(2)**i))

        net = Network()
        createL4L6aLocationColumn(
            net, {
                "inverseReadoutResolution": 8,
                "sensorInputSize": NUM_OF_CELLS,
                "L4Params": {
                    "columnCount": NUM_OF_COLUMNS,
                    "cellsPerColumn": CELLS_PER_COLUMN,
                    "activationThreshold": 15,
                    "minThreshold": 15,
                    "initialPermanence": 1.0,
                    "implementation": "ApicalTiebreak",
                    "maxSynapsesPerSegment": -1
                },
                "L6aParams": {
                    "moduleCount": len(scale),
                    "scale": scale,
                    "orientation": orientation,
                    "anchorInputSize": NUM_OF_CELLS,
                    "activationThreshold": 8,
                    "initialPermanence": 1.0,
                    "connectedPermanence": 0.5,
                    "learningThreshold": 8,
                    "sampleSize": 10,
                    "permanenceIncrement": 0.1,
                    "permanenceDecrement": 0.0,
                    "bumpOverlapMethod": "probabilistic"
                }
            })
        net.initialize()

        L4 = net.regions['L4']
        L6a = net.regions['L6a']
        sensor = net.regions['sensorInput'].getSelf()
        motor = net.regions['motorInput'].getSelf()

        # Keeps a list of learned objects
        learnedRepresentations = defaultdict(list)

        # Learn Objects
        self._setLearning(net, True)

        for objectDescription in OBJECTS:
            reset = True
            previousLocation = None
            L6a.executeCommand(["activateRandomLocation"])

            for iFeature, feature in enumerate(objectDescription["features"]):
                # Move the sensor to the center of the object
                locationOnObject = np.array([
                    feature["top"] + feature["height"] / 2.,
                    feature["left"] + feature["width"] / 2.
                ])

                # Calculate displacement from previous location
                if previousLocation is not None:
                    motor.addDataToQueue(locationOnObject - previousLocation)
                previousLocation = locationOnObject

                # Sense feature at location
                sensor.addDataToQueue(FEATURE_ACTIVE_COLUMNS[feature["name"]],
                                      reset, 0)
                net.run(1)
                reset = False

                # Save learned representations
                representation = L6a.getOutputData("sensoryAssociatedCells")
                representation = representation.nonzero()[0]
                learnedRepresentations[(objectDescription["name"],
                                        iFeature)] = representation

        # Infer objects
        self._setLearning(net, False)

        for objectDescription in OBJECTS:
            reset = True
            previousLocation = None
            inferred = False

            features = objectDescription["features"]
            touchSequence = range(len(features))
            random.shuffle(touchSequence)

            for iFeature in touchSequence:
                feature = features[iFeature]

                # Move the sensor to the center of the object
                locationOnObject = np.array([
                    feature["top"] + feature["height"] / 2.,
                    feature["left"] + feature["width"] / 2.
                ])

                # Calculate displacement from previous location
                if previousLocation is not None:
                    motor.addDataToQueue(locationOnObject - previousLocation)
                previousLocation = locationOnObject

                # Sense feature at location
                sensor.addDataToQueue(FEATURE_ACTIVE_COLUMNS[feature["name"]],
                                      reset, 0)
                net.run(1)
                reset = False

                representation = L6a.getOutputData("sensoryAssociatedCells")
                representation = representation.nonzero()[0]
                target_representations = set(
                    learnedRepresentations[(objectDescription["name"],
                                            iFeature)])

                inferred = (set(representation) <= target_representations)
                if inferred:
                    break

            self.assertTrue(inferred)
  def testSimpleMulticlassNetworkPY(self):
    # Setup data record stream of fake data (with three categories)
    filename = _getTempFileName()
    fields = [("timestamp", "datetime", "T"),
              ("value", "float", ""),
              ("reset", "int", "R"),
              ("sid", "int", "S"),
              ("categories", "list", "C")]
    records = (
      [datetime(day=1, month=3, year=2010), 0.0, 1, 0, "0"],
      [datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1"],
      [datetime(day=3, month=3, year=2010), 0.0, 0, 0, "0"],
      [datetime(day=4, month=3, year=2010), 1.0, 0, 0, "1"],
      [datetime(day=5, month=3, year=2010), 0.0, 0, 0, "0"],
      [datetime(day=6, month=3, year=2010), 1.0, 0, 0, "1"],
      [datetime(day=7, month=3, year=2010), 0.0, 0, 0, "0"],
      [datetime(day=8, month=3, year=2010), 1.0, 0, 0, "1"])
    dataSource = FileRecordStream(streamID=filename, write=True, fields=fields)
    for r in records:
      dataSource.appendRecord(list(r))

    # Create the network and get region instances.
    net = Network()
    net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
    net.addRegion("classifier", "py.SDRClassifierRegion",
                  "{steps: '0', alpha: 0.001, implementation: 'py'}")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput="dataOut", destInput="bottomUpIn")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput="categoryOut", destInput="categoryIn")
    sensor = net.regions["sensor"]
    classifier = net.regions["classifier"]

    # Setup sensor region encoder and data stream.
    dataSource.close()
    dataSource = FileRecordStream(filename)
    sensorRegion = sensor.getSelf()
    sensorRegion.encoder = MultiEncoder()
    sensorRegion.encoder.addEncoder(
      "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
    sensorRegion.dataSource = dataSource

    # Get ready to run.
    net.initialize()

    # Train the network (by default learning is ON in the classifier, but assert
    # anyway) and then turn off learning and turn on inference mode.
    self.assertEqual(classifier.getParameter("learningMode"), 1)
    net.run(8)

    # Test the network on the same data as it trained on; should classify with
    # 100% accuracy.
    classifier.setParameter("inferenceMode", 1)
    classifier.setParameter("learningMode", 0)

    # Assert learning is OFF and that the classifier learned the dataset.
    self.assertEqual(classifier.getParameter("learningMode"), 0,
                     "Learning mode is not turned off.")
    self.assertEqual(classifier.getParameter("inferenceMode"), 1,
                     "Inference mode is not turned on.")

    # make sure we can access all the parameters with getParameter
    self.assertEqual(classifier.getParameter("maxCategoryCount"), 2000)
    self.assertAlmostEqual(float(classifier.getParameter("alpha")), 0.001)
    self.assertEqual(int(classifier.getParameter("steps")), 0)
    self.assertTrue(classifier.getParameter("implementation") == "py")
    self.assertEqual(classifier.getParameter("verbosity"), 0)


    expectedCats = ([0.0], [1.0], [0.0], [1.0], [0.0], [1.0], [0.0], [1.0],)
    dataSource.rewind()
    for i in xrange(8):
      net.run(1)
      inferredCats = classifier.getOutputData("categoriesOut")
      self.assertSequenceEqual(expectedCats[i], inferredCats.tolist(),
                               "Classififer did not infer expected category "
                               "for record number {}.".format(i))
    # Close data stream, delete file.
    dataSource.close()
    os.remove(filename)
class FunctionRecogniter():

    def __init__(self):
        from collections import OrderedDict

        self.run_number = 0

        # for classifier
        self.classifier_encoder_list = {}
        self.classifier_input_list   = {}
        self.prevPredictedColumns    = {}

        self.selectivity = "region1"

        # net structure
        self.net_structure = OrderedDict()
        self.net_structure['sensor1'] = ['region1']
        # self.net_structure['sensor2'] = ['region2']
        # self.net_structure['sensor3'] = ['region3']
        # self.net_structure['region1'] = ['region4']
        # self.net_structure['region2'] = ['region4']

        # sensor change params
        self.sensor_params = {
                'sensor1': {
                    'xy_value': {
                        'maxval': 100.0,
                        'minval':  0.0
                        },
                    },
                # 'sensor2': {
                #     'xy_value': {
                #         'maxval': 80.0,
                #         'minval': 20.0
                #         },
                #     },
                # 'sensor3': {
                #     'xy_value': {
                #         'maxval': 100.0,
                #         'minval':  40.0
                #         },
                #     },
                }

        # region change params
        self.dest_resgion_data = {
                'region1': {
                    'SP_PARAMS':{
                        "columnCount": 2024,
                        "numActiveColumnsPerInhArea": 20,
                        },
                    'TP_PARAMS':{
                        "cellsPerColumn": 16
                        },
                    },
                # 'region2': {
                #     'TP_PARAMS':{
                #         "cellsPerColumn": 8
                #         },
                #     },
                # 'region3': {
                #     'TP_PARAMS':{
                #         "cellsPerColumn": 8
                #         },
                #     },
                # 'region4': {
                #     'SP_PARAMS':{
                #         "inputWidth": 2024 * (4 + 8)
                #         },
                #     'TP_PARAMS':{
                #         "cellsPerColumn": 16
                #         },
                #     },
                 }

        self._createNetwork()


        # for evaluate netwrok accuracy
        self.evaluation = NetworkEvaluation()


    def _addRegion(self, src_name, dest_name, params):
        import json
        from nupic.encoders import MultiEncoder

        sensor     =  src_name
        sp_name    = "sp_" + dest_name
        tp_name    = "tp_" + dest_name
        class_name = "class_" + dest_name

        try:
            self.network.regions[sp_name]
            self.network.regions[tp_name]
            self.network.regions[class_name]

            self.network.link(sensor, sp_name, "UniformLink", "")

        except Exception as e:
            # sp
            self.network.addRegion(sp_name, "py.SPRegion", json.dumps(params['SP_PARAMS']))
            self.network.link(sensor, sp_name, "UniformLink", "")

            # tp
            self.network.addRegion(tp_name, "py.TPRegion", json.dumps(params['TP_PARAMS']))
            self.network.link(sp_name, tp_name, "UniformLink", "")

            # class
            self.network.addRegion( class_name, "py.CLAClassifierRegion", json.dumps(params['CLASSIFIER_PARAMS']))
            self.network.link(tp_name, class_name, "UniformLink", "")

            encoder = MultiEncoder()
            encoder.addMultipleEncoders(params['CLASSIFIER_ENCODE_PARAMS'])
            self.classifier_encoder_list[class_name]  = encoder
            self.classifier_input_list[class_name]    = tp_name

    def _initRegion(self, name):
        sp_name = "sp_"+ name
        tp_name = "tp_"+ name
        class_name = "class_"+ name

        # setting sp
        SP = self.network.regions[sp_name]
        SP.setParameter("learningMode", True)
        SP.setParameter("anomalyMode", True)

        # setting tp
        TP = self.network.regions[tp_name]
        TP.setParameter("topDownMode", False)
        TP.setParameter("learningMode", True)
        TP.setParameter("inferenceMode", True)
        TP.setParameter("anomalyMode", False)

        # classifier regionを定義.
        classifier = self.network.regions[class_name]
        classifier.setParameter('inferenceMode', True)
        classifier.setParameter('learningMode', True)


    def _createNetwork(self):

        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update


        from nupic.algorithms.anomaly import computeAnomalyScore
        from nupic.encoders import MultiEncoder
        from nupic.engine import Network
        import create_network as cn
        import json
        import itertools


        self.network = Network()

        # sensor
        for sensor_name, change_params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor", json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            params = deepupdate(cn.SENSOR_PARAMS, change_params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders( params )
            sensor.encoder         = encoder

            # set datasource
            sensor.dataSource      = cn.DataBuffer()


        # network
        print 'create network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                change_params = self.dest_resgion_data[dest]
                params = deepupdate(cn.PARAMS, change_params)

                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    params['SP_PARAMS']['inputWidth'] = sensor.encoder.getWidth()
                    self._addRegion(source, dest, params)
                else:
                    self._addRegion("tp_" + source, dest, params)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            self._initRegion(name)


        # TODO: 1-3-1構造で, TPのセル数をむやみに増やすことは逆効果になるのでは?

        return


    def run(self, input_data, learn=True):
        """
        networkの実行.
        学習したいときは, learn=True, ftypeを指定する.
        予測したいときは, learn=False, ftypeはNoneを指定する.
        学習しているときも, 予測はしているがな.

        input_data = {'xy_value': [1.0, 2.0], 'ftype': 'sin'}
        """
        import itertools

        self.enable_learning_mode(learn)
        self.run_number += 1

        # calc encoder, SP, TP
        for sensor_name in self.sensor_params.keys():
            self.network.regions[sensor_name].getSelf().dataSource.push(input_data)
        self.network.run(1)
        #self.layer_output(input_data)
        #self.debug(input_data)


        # learn classifier
        inferences = {}
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            class_name = "class_" + name
            inferences['classifier_'+name]   = self._learn_classifier_multi(class_name, actValue=input_data['ftype'], pstep=0)

        # anomaly
        inferences["anomaly"] = self._calc_anomaly()

        # selectivity
        if input_data['ftype'] is not None and inferences["anomaly"][self.selectivity] < 0.7:
        #if input_data['ftype'] is not None and input_data['xy_value'][0] > 40 and input_data['xy_value'][0] < 60:
            tp_bottomUpOut = self.network.regions[ "tp_" + self.selectivity ].getOutputData("bottomUpOut").nonzero()[0]
            self.evaluation.save_cell_activity(tp_bottomUpOut, input_data['ftype'])

        return inferences


    def _learn_classifier_multi(self, region_name, actValue=None, pstep=0):
        """
        classifierの計算を行う.

        直接customComputeを呼び出さずに, network.runの中でやりたいところだけど,
        計算した内容の取り出し方法がわからない.
        """

        # TODO: networkとclassifierを完全に切り分けたいな.
        #       networkでは, sensor,sp,tpまで計算を行う.
        #       その計算結果の評価/利用は外に出す.

        classifier     = self.network.regions[region_name]
        encoder        = self.classifier_encoder_list[region_name].getEncoderList()[0]
        class_input    = self.classifier_input_list[region_name]
        tp_bottomUpOut = self.network.regions[class_input].getOutputData("bottomUpOut").nonzero()[0]
        #tp_bottomUpOut = self.network.regions["TP"].getSelf()._tfdr.infActiveState['t'].reshape(-1).nonzero()[0]

        if actValue is not None:
            bucketIdx = encoder.getBucketIndices(actValue)[0]
            classificationIn = {
                    'bucketIdx': bucketIdx,
                    'actValue': actValue
                    }
        else:
            classificationIn = {'bucketIdx': 0,'actValue': 'no'}
        clResults = classifier.getSelf().customCompute(
                recordNum=self.run_number,
                patternNZ=tp_bottomUpOut,
                classification=classificationIn
                )

        inferences= self._get_inferences(clResults, pstep, summary_tyep='sum')

        return inferences

    def _get_inferences(self, clResults, steps, summary_tyep='sum'):
        """
        classifierの計算結果を使いやすいように変更するだけ.
        """
        from collections import defaultdict

        likelihoodsVec = clResults[steps]
        bucketValues   = clResults['actualValues']

        likelihoodsDict = defaultdict(int)
        bestActValue = None
        bestProb = None

        if summary_tyep == 'sum':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                likelihoodsDict[actValue] += prob
                if bestProb is None or likelihoodsDict[actValue] > bestProb:
                    bestProb = likelihoodsDict[actValue]
                    bestActValue = actValue

        elif summary_tyep == 'best':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                if bestProb is None or prob > bestProb:
                    likelihoodsDict[actValue] = prob
                    bestProb = prob
                    bestActValue = actValue

        return {'likelihoodsDict': likelihoodsDict, 'best': {'value': bestActValue, 'prob':bestProb}}


    def _calc_anomaly(self):
        """
        各層のanomalyを計算
        """
        import copy
        import itertools
        from nupic.algorithms.anomaly import computeAnomalyScore

        score = 0
        anomalyScore = {}
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            sp_bottomUpOut = self.network.regions["sp_"+name].getOutputData("bottomUpOut").nonzero()[0]
            if self.prevPredictedColumns.has_key(name):
                score = computeAnomalyScore(sp_bottomUpOut, self.prevPredictedColumns[name])
            #topdown_predict = self.network.regions["TP"].getSelf()._tfdr.topDownCompute().copy().nonzero()[0]
            topdown_predict = self.network.regions["tp_"+name].getSelf()._tfdr.topDownCompute().nonzero()[0]
            self.prevPredictedColumns[name] = copy.deepcopy(topdown_predict)

            anomalyScore[name] = score

        return anomalyScore

    def reset(self):
        """
        reset sequence
        """
        import itertools
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            self.network.regions["tp_"+name].getSelf().resetSequenceStates()

    def enable_learning_mode(self, enable):
        """
        各層のSP, TP, ClassifierのlearningModeを変更
        """
        import itertools
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            self.network.regions["sp_"+name].setParameter("learningMode", enable)
            self.network.regions["tp_"+name].setParameter("learningMode", enable)
            self.network.regions["class_"+name].setParameter("learningMode", enable)


    def print_inferences(self, input_data, inferences):
        """
        計算結果を出力する
        """
        import itertools

        print "%10s, %10s, %5s" % (
                int(input_data['xy_value'][0]),
                int(input_data['xy_value'][1]),
                input_data['ftype']),

        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            print "%5s," % (inferences['classifier_'+name]['best']['value']),

        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            print "%10.6f," % (inferences['classifier_'+name]['likelihoodsDict'][input_data['ftype']]),

        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            print "%5s," % (str(inferences["anomaly"][name])),
        print
class ClaClassifier():

    def __init__(self, net_structure, sensor_params, dest_region_params, class_encoder_params):

        self.run_number = 0

        # for classifier
        self.classifier_encoder_list = {}
        self.classifier_input_list   = {}
        self.prevPredictedColumns    = {}

        # TODO: 消したいパラメータ
        self.predict_value = class_encoder_params.keys()[0]
        self.predict_step  = 0


        # default param
        self.default_params = {
            'SP_PARAMS':  {
                "spVerbosity": 0,
                "spatialImp": "cpp",
                "globalInhibition": 1,
                "columnCount": 2024,
                "inputWidth": 0,             # set later
                "numActiveColumnsPerInhArea": 20,
                "seed": 1956,
                "potentialPct": 0.8,
                "synPermConnected": 0.1,
                "synPermActiveInc": 0.05,
                "synPermInactiveDec": 0.0005,
                "maxBoost": 2.0,
                },
            'TP_PARAMS': {
                "verbosity": 0,
                "columnCount": 2024,
                "cellsPerColumn": 32,
                "inputWidth": 2024,
                "seed": 1960,
                "temporalImp": "cpp",
                "newSynapseCount": 20,
                "maxSynapsesPerSegment": 32,
                "maxSegmentsPerCell": 128,
                "initialPerm": 0.21,
                "permanenceInc": 0.2,
                "permanenceDec": 0.1,
                "globalDecay": 0.0,
                "maxAge": 0,
                "minThreshold": 12,
                "activationThreshold": 16,
                "outputType": "normal",
                "pamLength": 1,
                },
            'CLASSIFIER_PARAMS':  {
                "clVerbosity": 0,
                "alpha": 0.005,
                "steps": "0"
                }
            }

        # tp
        self.tp_enable = True

        # net structure
        self.net_structure = OrderedDict()
        self.net_structure['sensor3'] = ['region1']
        self.net_structure['region1'] = ['region2']

        self.net_structure = net_structure

        # region change params
        self.dest_region_params = dest_region_params

        # sensor change params
        self.sensor_params = sensor_params

        self.class_encoder_params = class_encoder_params

        self._createNetwork()


    def _makeRegion(self, name, params):
        sp_name    = "sp_" + name
        if self.tp_enable:
            tp_name    = "tp_" + name
        class_name = "class_" + name

        # addRegion
        self.network.addRegion(sp_name, "py.SPRegion", json.dumps(params['SP_PARAMS']))
        if self.tp_enable:
            self.network.addRegion(tp_name, "py.TPRegion", json.dumps(params['TP_PARAMS']))
        self.network.addRegion( class_name, "py.CLAClassifierRegion", json.dumps(params['CLASSIFIER_PARAMS']))

        encoder = MultiEncoder()
        encoder.addMultipleEncoders(self.class_encoder_params)
        self.classifier_encoder_list[class_name]  = encoder
        if self.tp_enable:
            self.classifier_input_list[class_name]    = tp_name
        else:
            self.classifier_input_list[class_name]    = sp_name

    def _linkRegion(self, src_name, dest_name):
        sensor     =  src_name
        sp_name    = "sp_" + dest_name
        tp_name    = "tp_" + dest_name
        class_name = "class_" + dest_name

        if self.tp_enable:
            self.network.link(sensor, sp_name, "UniformLink", "")
            self.network.link(sp_name, tp_name, "UniformLink", "")
            self.network.link(tp_name, class_name, "UniformLink", "")
        else:
            self.network.link(sensor, sp_name, "UniformLink", "")
            self.network.link(sp_name, class_name, "UniformLink", "")


    def _initRegion(self, name):
        sp_name = "sp_"+ name
        tp_name = "tp_"+ name
        class_name = "class_"+ name

        # setting sp
        SP = self.network.regions[sp_name]
        SP.setParameter("learningMode", True)
        SP.setParameter("anomalyMode", True)

        # # setting tp
        if self.tp_enable:
            TP = self.network.regions[tp_name]
            TP.setParameter("topDownMode", False)
            TP.setParameter("learningMode", True)
            TP.setParameter("inferenceMode", True)
            TP.setParameter("anomalyMode", False)

        # classifier regionを定義.
        classifier = self.network.regions[class_name]
        classifier.setParameter('inferenceMode', True)
        classifier.setParameter('learningMode', True)


    def _createNetwork(self):

        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            if update is None:
                return None
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update


        self.network = Network()

        # check
        # if self.selectivity not in self.dest_region_params.keys():
        #     raise Exception, "There is no selected region : " + self.selectivity
        if not len(self.net_structure.keys()) == len(set(self.net_structure.keys())):
            raise Exception, "There is deplicated net_structure keys : " + self.net_structure.keys()

        # sensor
        for sensor_name, params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor", json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            #params = deepupdate(cn.SENSOR_PARAMS, params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders( params )
            sensor.encoder         = encoder
            sensor.dataSource      = DataBuffer()


        # network
        print 'create element ...'
        for name in self.dest_region_params.keys():
            change_params = self.dest_region_params[name]
            params = deepupdate(self.default_params, change_params)
            # input width
            input_width = 0
            for source in [s for s,d in self.net_structure.items() if name in d]:
                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    input_width += sensor.encoder.getWidth()
                else:
                    input_width += params['TP_PARAMS']['cellsPerColumn'] * params['TP_PARAMS']['columnCount']

            params['SP_PARAMS']['inputWidth'] = input_width
            self._makeRegion(name, params)

        # link
        print 'link network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                if source in self.sensor_params.keys():
                    self._linkRegion(source, dest)
                else:
                    if self.tp_enable:
                        self._linkRegion("tp_" + source, dest)
                    else:
                        self._linkRegion("sp_" + source, dest)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in self.dest_region_params.keys():
            self._initRegion(name)

        return


    #@profile
    def run(self, input_data, learn=True, class_learn=True,learn_layer=None):
        """
        networkの実行.
        学習したいときは, learn=True, ftypeを指定する.
        予測したいときは, learn=False, ftypeはNoneを指定する.
        学習しているときも, 予測はしているがな.

        input_data = {'xy_value': [1.0, 2.0], 'ftype': 'sin'}
        """

        self.enable_learning_mode(learn, learn_layer)
        self.enable_class_learning_mode(class_learn)

        self.run_number += 1

        # calc encoder, SP, TP
        for sensor_name in self.sensor_params.keys():
            self.network.regions[sensor_name].getSelf().dataSource.push(input_data)
        self.network.run(1)
        #self.layer_output(input_data)
        #self.debug(input_data)


        # learn classifier
        inferences = {}
        for name in self.dest_region_params.keys():
            class_name = "class_" + name
            inferences['classifier_'+name]   = self._learn_classifier_multi(class_name, actValue=input_data[self.predict_value], pstep=self.predict_step)



        # anomaly
        #inferences["anomaly"] = self._calc_anomaly()

        return inferences


    def _learn_classifier_multi(self, region_name, actValue=None, pstep=0):
        """
        classifierの計算を行う.

        直接customComputeを呼び出さずに, network.runの中でやりたいところだけど,
        計算した内容の取り出し方法がわからない.
        """

        # TODO: networkとclassifierを完全に切り分けたいな.
        #       networkでは, sensor,sp,tpまで計算を行う.
        #       その計算結果の評価/利用は外に出す.

        classifier     = self.network.regions[region_name]
        encoder        = self.classifier_encoder_list[region_name].getEncoderList()[0]
        class_input    = self.classifier_input_list[region_name]
        tp_bottomUpOut = self.network.regions[class_input].getOutputData("bottomUpOut").nonzero()[0]
        #tp_bottomUpOut = self.network.regions["TP"].getSelf()._tfdr.infActiveState['t'].reshape(-1).nonzero()[0]

        if actValue is not None:
            bucketIdx = encoder.getBucketIndices(actValue)[0]
            classificationIn = {
                    'bucketIdx': bucketIdx,
                    'actValue': actValue
                    }
        else:
            classificationIn = {'bucketIdx': 0,'actValue': 'no'}
        clResults = classifier.getSelf().customCompute(
                recordNum=self.run_number,
                patternNZ=tp_bottomUpOut,
                classification=classificationIn
                )

        inferences= self._get_inferences(clResults, pstep, summary_tyep='sum')

        return inferences

    def _get_inferences(self, clResults, steps, summary_tyep='sum'):
        """
        classifierの計算結果を使いやすいように変更するだけ.
        """

        likelihoodsVec = clResults[steps]
        bucketValues   = clResults['actualValues']

        likelihoodsDict = defaultdict(int)
        bestActValue = None
        bestProb = None

        if summary_tyep == 'sum':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                likelihoodsDict[actValue] += prob
                if bestProb is None or likelihoodsDict[actValue] > bestProb:
                    bestProb = likelihoodsDict[actValue]
                    bestActValue = actValue

        elif summary_tyep == 'best':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                if bestProb is None or prob > bestProb:
                    likelihoodsDict[actValue] = prob
                    bestProb = prob
                    bestActValue = actValue

        return {'likelihoodsDict': likelihoodsDict, 'best': {'value': bestActValue, 'prob':bestProb}}


    def _calc_anomaly(self):
        """
        各層のanomalyを計算
        """

        score = 0
        anomalyScore = {}
        for name in self.dest_region_params.keys():
            #sp_bottomUpOut = self.network.regions["sp_"+name].getOutputData("bottomUpOut").nonzero()[0]
            sp_bottomUpOut = self.network.regions["tp_"+name].getInputData("bottomUpIn").nonzero()[0]

            if self.prevPredictedColumns.has_key(name):
                score = computeAnomalyScore(sp_bottomUpOut, self.prevPredictedColumns[name])
            #topdown_predict = self.network.regions["TP"].getSelf()._tfdr.topDownCompute().copy().nonzero()[0]
            topdown_predict = self.network.regions["tp_"+name].getSelf()._tfdr.topDownCompute().nonzero()[0]
            self.prevPredictedColumns[name] = copy.deepcopy(topdown_predict)

            anomalyScore[name] = score

        return anomalyScore

    def reset(self):
        """
        reset sequence
        """
        # for name in self.dest_region_params.keys():
        #     self.network.regions["tp_"+name].getSelf().resetSequenceStates()
        return

        # for sensor_name in self.sensor_params.keys():
        #     sensor = self.network.regions[sensor_name].getSelf()
        #     sensor.dataSource = DataBuffer()

    def enable_class_learning_mode(self, enable):
        for name in self.dest_region_params.keys():
            self.network.regions["class_"+name].setParameter("learningMode", enable)

    def enable_learning_mode(self, enable, layer_name = None):
        """
        各層のSP, TP, ClassifierのlearningModeを変更
        """
        if layer_name is None:
            for name in self.dest_region_params.keys():
                self.network.regions["sp_"+name].setParameter("learningMode", enable)
                if self.tp_enable:
                    self.network.regions["tp_"+name].setParameter("learningMode", enable)
                self.network.regions["class_"+name].setParameter("learningMode", enable)
        else:
            for name in self.dest_region_params.keys():
                self.network.regions["sp_"+name].setParameter("learningMode", not enable)
                if self.tp_enable:
                    self.network.regions["tp_"+name].setParameter("learningMode", not enable)
                self.network.regions["class_"+name].setParameter("learningMode", not enable)
            for name in layer_name:
                self.network.regions["sp_"+name].setParameter("learningMode", enable)
                if self.tp_enable:
                    self.network.regions["tp_"+name].setParameter("learningMode", enable)
                self.network.regions["class_"+name].setParameter("learningMode", enable)


    def print_inferences(self, input_data, inferences):
        """
        計算結果を出力する
        """

        # print "%10s, %10s, %1s" % (
        #         int(input_data['xy_value'][0]),
        #         int(input_data['xy_value'][1]),
        #         input_data['label'][:1]),
        print "%5s" % (
                input_data['label']),

        try:
            for name in sorted(self.dest_region_params.keys()):
                print "%5s" % (inferences['classifier_'+name]['best']['value']),

            for name in sorted(self.dest_region_params.keys()):
                print "%6.4f," % (inferences['classifier_'+name]['likelihoodsDict'][input_data[self.predict_value]]),
        except:
            pass

        # for name in sorted(self.dest_region_params.keys()):
        #     print "%3.2f," % (inferences["anomaly"][name]),

        # for name in sorted(self.dest_region_params.keys()):
        #     print "%5s," % name,

        print

    def layer_output(self, input_data, region_name=None):
        if region_name is not None:
            Region = self.network.regions[region_name]
            print Region.getOutputData("bottomUpOut").nonzero()[0]
            return

        for name in self.dest_region_params.keys():
            SPRegion = self.network.regions["sp_"+name]
            if self.tp_enable:
                TPRegion = self.network.regions["tp_"+name]

            print "#################################### ", name
            print
            print "==== SP layer ===="
            print "input:  ", SPRegion.getInputData("bottomUpIn").nonzero()[0][:20]
            print "output: ", SPRegion.getOutputData("bottomUpOut").nonzero()[0][:20]
            print
            if self.tp_enable:
                print "==== TP layer ===="
                print "input:  ", TPRegion.getInputData("bottomUpIn").nonzero()[0][:20]
                print "output: ", TPRegion.getOutputData("bottomUpOut").nonzero()[0][:20]
                print
            print "==== Predict ===="
            print TPRegion.getSelf()._tfdr.topDownCompute().copy().nonzero()[0][:20]
            print

    def save(self, path):
        import pickle
        with open(path, 'wb') as modelPickleFile:
            pickle.dump(self, modelPickleFile)
Ejemplo n.º 33
0
class HTM():
    def __init__(self, dataSource, rdse_resolution, params=None, verbosity=3):
        """Create the Network instance.

        The network has a sensor region reading data from `dataSource` and passing
        the encoded representation to an SPRegion. The SPRegion output is passed to
        a TMRegion.

        :param dataSource: a RecordStream instance to get data from
        :param rdse_resolution: float, resolution of Random Distributed Scalar Encoder
        :param cellsPerMiniColumn: int, number of cells per mini-column. Default=32
        """
        DATE = '{}'.format(strftime('%Y-%m-%d_%H:%M:%S', localtime()))
        self.log_file = join(
            '../logs/', 'HTM-{}-({}RDSEres)-datasource-{}.log'.format(
                DATE, rdse_resolution, str(dataSource)))
        log.basicConfig(format='[%(asctime)s] %(message)s',
                        datefmt='%m/%d/%Y %H:%M:%S %p',
                        filename=self.log_file,
                        level=log.DEBUG)
        self.streaming = False
        self.setVerbosity(verbosity)

        self.modelParams = {}
        log.debug("...loading params from {}...".format(_PARAMS_PATH))
        try:
            with open(_PARAMS_PATH, "r") as f:
                self.modelParams = yaml.safe_load(f)["modelParams"]
        except:
            with open(os.path.join("..", _PARAMS_PATH), "r") as f:
                self.modelParams = yaml.safe_load(f)["modelParams"]
        # Create a network that will hold the regions.
        self.network = Network()
        # Add a sensor region.
        self.network.addRegion("sensor", "py.RecordSensor", '{}')
        # Set the encoder and data source of the sensor region.
        self.sensorRegion = self.network.regions["sensor"].getSelf()
        #sensorRegion.encoder = createEncoder(modelParams["sensorParams"]["encoders"])
        self.encoder = RDSEEncoder(rdse_resolution)
        self.sensorRegion.encoder = self.encoder.get_encoder()
        self.sensorRegion.dataSource = TimeSeriesStream(dataSource)
        self.network.regions["sensor"].setParameter("predictedField", "series")

        # Adjust params
        # Make sure the SP input width matches the sensor region output width.
        self.modelParams["spParams"][
            "inputWidth"] = self.sensorRegion.encoder.getWidth()
        if not params == None:
            for key, value in params.iteritems():
                if key == "clParams" or key == "spParams" or key == "tmParams":
                    for vkey, vvalue in value.iteritems():
                        #print(key, vkey, vvalue)
                        self.modelParams[key][vkey] = vvalue
        log.debug("xxx HTM Params: xxx\n{}\n".format(
            json.dumps(self.modelParams, sort_keys=True, indent=4)))
        # Add SP and TM regions.
        self.network.addRegion("spatialPoolerRegion", "py.SPRegion",
                               json.dumps(self.modelParams["spParams"]))
        self.network.addRegion("temporalPoolerRegion", "py.TMRegion",
                               json.dumps(self.modelParams["tmParams"]))
        # Add a classifier region.
        clName = "py.%s" % self.modelParams["clParams"].pop("regionName")
        self.network.addRegion("classifier", clName,
                               json.dumps(self.modelParams["clParams"]))
        # link regions
        self.linkSensorToClassifier()
        self.linkSensorToSpatialPooler()
        self.linkSpatialPoolerToTemporalPooler()
        self.linkTemporalPoolerToClassifier()
        self.linkResets()
        # possibly do reset links here (says they are optional
        self.network.initialize()
        self.turnInferenceOn()
        self.turnLearningOn()

    def __del__(self):
        """ closes all loggers """
        try:
            logger = log.getLogger()
            handlers = logger.handlers[:]
            for handler in handlers:
                try:
                    handler.close()
                    logger.removeHandler(handler)
                except:
                    pass
        except:
            pass

    def __str__(self):
        spRegion = self.network.getRegionsByType(SPRegion)[0]
        sp = spRegion.getSelf().getAlgorithmInstance()
        _str = "spatial pooler region inputs: {0}\n".format(
            spRegion.getInputNames())
        _str += "spatial pooler region outputs: {0}\n".format(
            spRegion.getOutputNames())
        _str += "# spatial pooler columns: {0}\n\n".format(sp.getNumColumns())

        tmRegion = self.network.getRegionsByType(TMRegion)[0]
        tm = tmRegion.getSelf().getAlgorithmInstance()
        _str += "temporal memory region inputs: {0}\n".format(
            tmRegion.getInputNames())
        _str += "temporal memory region outputs: {0}\n".format(
            tmRegion.getOutputNames())
        _str += "# temporal memory columns: {0}\n".format(tm.numberOfCols)
        return _str

    def getClassifierResults(self):
        """Helper function to extract results for all prediction steps."""
        classifierRegion = self.network.regions["classifier"]
        actualValues = classifierRegion.getOutputData("actualValues")
        probabilities = classifierRegion.getOutputData("probabilities")
        steps = classifierRegion.getSelf().stepsList
        N = classifierRegion.getSelf().maxCategoryCount
        results = {step: {} for step in steps}
        for i in range(len(steps)):
            # stepProbabilities are probabilities for this prediction step only.
            stepProbabilities = probabilities[i * N:(i + 1) * N - 1]
            mostLikelyCategoryIdx = stepProbabilities.argmax()
            predictedValue = actualValues[mostLikelyCategoryIdx]
            predictionConfidence = stepProbabilities[mostLikelyCategoryIdx]
            results[steps[i]]["predictedValue"] = float(predictedValue)
            results[steps[i]]["predictionConfidence"] = float(
                predictionConfidence)
        log.debug("Classifier Reults:\n{}".format(
            json.dumps(results, sort_keys=True, indent=4)))
        return results

    def getCurrSeries(self):
        return self.network.regions["sensor"].getOutputData("sourceOut")[0]

    def getStepsList(self):
        return self.network.regions["classifier"].getSelf().stepsList

    def getTimeSeriesStream(self):
        return self.network.regions["sensor"].getSelf().dataSource

    def setDatasource(self, new_source):
        self.network.regions["sensor"].getSelf().dataSource = TimeSeriesStream(
            new_source)

    def linkResets(self):
        """createResetLink(network, "sensor", "spatialPoolerRegion")
        createResetLink(network, "sensor", "temporalPoolerRegion")"""
        self.network.link("sensor",
                          "spatialPoolerRegion",
                          "UniformLink",
                          "",
                          srcOutput="resetOut",
                          destInput="resetIn")
        self.network.link("sensor",
                          "temporalPoolerRegion",
                          "UniformLink",
                          "",
                          srcOutput="resetOut",
                          destInput="resetIn")

    def linkSensorToClassifier(self):
        """Create required links from a sensor region to a classifier region."""
        self.network.link("sensor",
                          "classifier",
                          "UniformLink",
                          "",
                          srcOutput="bucketIdxOut",
                          destInput="bucketIdxIn")
        self.network.link("sensor",
                          "classifier",
                          "UniformLink",
                          "",
                          srcOutput="actValueOut",
                          destInput="actValueIn")
        self.network.link("sensor",
                          "classifier",
                          "UniformLink",
                          "",
                          srcOutput="categoryOut",
                          destInput="categoryIn")

    def linkSensorToSpatialPooler(self):
        self.network.link("sensor",
                          "spatialPoolerRegion",
                          "UniformLink",
                          "",
                          srcOutput="dataOut",
                          destInput="bottomUpIn")

    def linkSpatialPoolerToTemporalPooler(self):
        """Create a feed-forward link between 2 regions: spatialPoolerRegion -> temporalPoolerRegion"""
        self.network.link("spatialPoolerRegion",
                          "temporalPoolerRegion",
                          "UniformLink",
                          "",
                          srcOutput="bottomUpOut",
                          destInput="bottomUpIn")

    def linkTemporalPoolerToClassifier(self):
        """Create a feed-forward link between 2 regions: temporalPoolerRegion -> classifier"""
        self.network.link("temporalPoolerRegion",
                          "classifier",
                          "UniformLink",
                          "",
                          srcOutput="bottomUpOut",
                          destInput="bottomUpIn")

    def setVerbosity(self, level):
        """
        Sets the level of print statements/logging (verbosity)
        * 3 == DEBUG
        * 2 == VERBOSE
        * 1 == WARNING
        """
        if self.log_file == None:  # if there's no log file, make one
            DATE = '{}'.format(strftime('%Y-%m-%d_%H:%M:%S', localtime()))
            self.log_file = join(
                '../logs/',
                'HTM-{}-({}CPMC-{}RDSEres)-datasource-{}.log'.format(
                    DATE, self.modelParams["tmParams"]["cellsPerColumn"],
                    self.encoder.get_resolution(),
                    str(self.sensorRegion.dataSource)))
            log.basicConfig(format='[%(asctime)s] %(message)s',
                            datefmt='%m/%d/%Y %H:%M:%S %p',
                            filename=self.log_file,
                            level=log.DEBUG)

        if level >= 4 and not self.streaming:
            log.getLogger().addHandler(log.StreamHandler())
            self.streaming = True
        if level >= 3:
            log.getLogger().setLevel(log.DEBUG)
        elif level >= 2:
            log.getLogger().setLevel(log.VERBOSE)
        elif level >= 1:
            log.getLogger().setLevel(log.WARNING)

    def runNetwork(self, learning=True):
        DATE = '{}'.format(strftime('%Y-%m-%d_%H:%M:%S', localtime()))
        _OUTPUT_PATH = "../outputs/HTMOutput-{}-{}.csv".format(
            DATE, self.network.regions["sensor"].getSelf().dataSource)
        self.sensorRegion.dataSource.rewind()

        # Set predicted field
        self.network.regions["sensor"].setParameter("predictedField", "series")

        if learning == True:
            # Enable learning for all regions.
            self.turnLearningOn()
        elif learning == False:
            # Enable learning for all regions.
            self.turnLearningOff()
        else:
            self.turnLearningOff()
            self.turnLearningOn(learning)
        self.turnInferenceOn()

        _model = self.network.regions["sensor"].getSelf().dataSource

        with open(_OUTPUT_PATH, "w") as outputFile:
            writer = csv.writer(outputFile)
            log.info("Writing output to {}".format(_OUTPUT_PATH))
            steps = self.getStepsList()
            header_row = ["Time Step", "Series"]
            for step in steps:
                header_row.append("{} Step Pred".format(step))
                header_row.append("{} Step Pred Conf".format(step))
            writer.writerow(header_row)
            results = []
            one_preds = []
            for i in range(len(_model)):
                # Run the network for a single iteration
                self.network.run(1)

                series = self.network.regions["sensor"].getOutputData(
                    "sourceOut")[0]
                predictionResults = self.getClassifierResults()
                result = [_model.getBookmark(), series]
                one_preds.append(predictionResults[1]["predictedValue"])
                for key, value in predictionResults.iteritems():
                    result.append(value["predictedValue"])
                    result.append(value["predictionConfidence"] * 100)
                #print "{:6}: 1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(*result)
                results.append(result)
                writer.writerow(result)
                outputFile.flush()
            return one_preds, results

    def runWithMode(self,
                    mode,
                    error_method="rmse",
                    weights={
                        1: 1.0,
                        5: 1.0
                    },
                    normalize_error=False):
        '''
        Modes:
        * "strain" - Learning on spatial pool, on training set
        * "train" - Learning, on training set
        * "test" - No learning, on test set
        * "eval" - Learning, on eval set
        '''
        mode = mode.lower()
        error_method = error_method.lower()
        log.debug(
            "entered `runWithMode` with with:\n  mode: {}\n    error_method: {}"
            .format(mode, error_method))

        _model = self.getTimeSeriesStream()

        if mode == "strain":
            self.turnLearningOff("ct")
            self.turnLearningOn("s")
        else:
            self.turnLearningOn()
        self.turnInferenceOn()

        results = {}

        steps = self.getStepsList()
        for step in steps:
            results[step] = 0
        predictions = {}
        for step in steps:
            predictions[step] = [None] * step

        last_prediction = None
        five_pred = [None] * 5  # list of 5 Nones
        if mode == "strain" or mode == "train":
            _model.set_to_train_theta()
            while _model.in_train_set():
                temp = self.run_with_mode_one_iter(error_method, results,
                                                   predictions)
                results = temp[0]
                predictions = temp[1]
        elif mode == "test":
            _model.set_to_test_theta()
            while _model.in_test_set():
                temp = self.run_with_mode_one_iter(error_method, results,
                                                   predictions)
                results = temp[0]
                predictions = temp[1]
        elif mode == "eval":
            _model.set_to_eval_theta()
            while _model.in_eval_set():
                temp = self.run_with_mode_one_iter(error_method, results,
                                                   predictions)
                results = temp[0]
                predictions = temp[1]
            steps = self.getStepsList()
            for step in steps:
                weights[step] = 0
            weights[
                1] = 1  # weights for eval hard-coded to just look at one-step prediction for now

            # normalize result over length of evaluation set
            for key in results:
                results[key] /= (self.sensorRegion.dataSource.len_eval_set() -
                                 1)

        # preprocess weights to put in zero weights
        for key, value in results.iteritems():
            try:
                weights[key]
            except:
                weights[key] = 0

        for key, value in results.iteritems():
            results[key] = results[key] * weights[key]

        if normalize_error == True:
            _range = self.getTimeSeriesStream().get_range()
            if not _range == None:
                for key, value in results.iteritems():
                    results[key] = value / _range

        return results

    def run_with_mode_one_iter(self, error_method, results, predictions=None):
        self.network.run(1)
        series = self.getCurrSeries()

        for key, value in results.iteritems():
            if predictions[key][0] == None:
                pass
            elif error_method == "rmse":
                results[key] += sqrt((series - predictions[key][0])**2)
            elif error_method == "binary":
                if not series == predictions[key][0]:
                    results[key] += 1

        # update predictions
        classRes = self.getClassifierResults()
        for key, value in predictions.iteritems():
            for i in range(key - 1):
                value[i] = value[i + 1]  # shift predictions down one
            value[key - 1] = classRes[key]["predictedValue"]

        return (results, predictions)

    def setRDSEResolution(self, new_res):
        self.encoder = RDSEEncoder(new_res)

    def train(self,
              error_method="rmse",
              sibt=0,
              iter_per_cycle=1,
              max_cycles=20,
              weights={
                  1: 1.0,
                  5: 1.0
              },
              normalize_error=False):
        """
        Trains the HTM on `dataSource`

        :param  error_method - the metric for calculating error ("rmse" root mean squared error or "binary")
        :param  sibt - spatial (pooler) iterations before temporal (pooler)
        """
        for i in range(sibt):
            log.debug(
                "\nxxxxx Iteration {}/{} of the Spatial Pooler Training xxxxx".
                format(i + 1, sibt))
            # train on spatial pooler
            log.debug(
                "Error for spatial training iteration {} was {} with {} error method"
                .format(
                    i,
                    self.runWithMode("strain", error_method, weights,
                                     normalize_error), error_method))
        log.info("\nExited spatial pooler only training loop")
        last_error = 0  # set to infinity error so you keep training the first time
        curr_error = -1
        counter = 0
        log.info("Entering full training loop")
        while (fcompare(curr_error, last_error) == -1
               and counter < max_cycles):
            log.debug(
                "\n++++++++++ Cycle {} of the full training loop +++++++++\n".
                format(counter))
            last_error = curr_error
            curr_error = 0
            for i in range(int(iter_per_cycle)):
                log.debug("\n----- Iteration {}/{} of Cycle {} -----\n".format(
                    i + 1, iter_per_cycle, counter))
                log.debug(
                    "Error for full training cycle {}, iteration {} was {} with {} error method"
                    .format(
                        counter, i,
                        self.runWithMode("train", error_method, weights,
                                         normalize_error), error_method))
                result = self.runWithMode("test", error_method, weights,
                                          normalize_error)
                for key, value in result.iteritems():
                    curr_error += value
            log.debug("Cycle {} - last: {}    curr: {}".format(
                counter, last_error, curr_error))
            counter += 1
            if last_error == -1:
                last_error = float("inf")
        self.sensorRegion.dataSource.rewind()
        final_error = self.runWithMode("eval", error_method, weights,
                                       normalize_error)
        log.info("FINAL ERROR: {}".format(final_error[1]))
        return final_error[1]

    def turnInferenceOn(self):
        log.debug("Inference enabled for all regions")
        self.network.regions["spatialPoolerRegion"].setParameter(
            "inferenceMode", 1)
        self.network.regions["temporalPoolerRegion"].setParameter(
            "inferenceMode", 1)
        self.network.regions["classifier"].setParameter("inferenceMode", 1)

    def turnLearningOn(self, turnOn="cst"):
        """
        Turns learning on for certain segments

        :param turnOn - a string of characters representing the segments you'd like to turn on
        * c ---> classifier
        * s ---> spatial pooler
        * t ---> temporal pooler
        """
        for i in range(len(turnOn)):
            target = turnOn[0].lower()
            turnOn = turnOn[1:]
            if target == "c":
                log.debug("Learning enabled for classifier")
                self.network.regions["classifier"].setParameter(
                    "learningMode", 1)
            elif target == "s":
                log.debug("Learning enabled for spatial pooler region")
                self.network.regions["spatialPoolerRegion"].setParameter(
                    "learningMode", 1)
            elif target == "t":
                log.debug("Learning enabled for temporal pooler region")
                self.network.regions["temporalPoolerRegion"].setParameter(
                    "learningMode", 1)

    def turnLearningOff(self, turnOff="cst"):
        """
        Turns learning off for certain segments

        :param turnOff - a string of characters representing the segments you'd like to turn off
        * c ---> classifier
        * s ---> spatial pooler
        * t ---> temporal pooler
        """
        for i in range(len(turnOff)):
            target = turnOff[0].lower()
            turnOff = turnOff[1:]
            if target == "c":
                log.debug("Learning disabled for classifier")
                self.network.regions["classifier"].setParameter(
                    "learningMode", 0)
            elif target == "s":
                log.debug("Learning disabled for spatial pooler region")
                self.network.regions["spatialPoolerRegion"].setParameter(
                    "learningMode", 0)
            elif target == "t":
                log.debug("Learning disabled for temporal pooler region")
                self.network.regions["temporalPoolerRegion"].setParameter(
                    "learningMode", 0)