예제 #1
0
def runExperiment():
    Network.unregisterRegion("ImageSensor")
    Network.registerRegion(ImageSensor)
    Network.registerRegion(PCANode)
    inputSize = 8

    net = Network()
    sensor = net.addRegion(
        "sensor", "py.ImageSensor",
        "{ width: %d, height: %d }" % (inputSize, inputSize))

    params = ("{bottomUpCount: %s, "
              " SVDSampleCount: 5, "
              " SVDDimCount: 2}" % inputSize)

    pca = net.addRegion("pca", "py.PCANode", params)

    linkParams = "{ mapping: in, rfSize: [%d, %d] }" % (inputSize, inputSize)
    net.link("sensor", "pca", "UniformLink", linkParams, "dataOut",
             "bottomUpIn")

    net.initialize()

    for i in range(10):
        pca.getSelf()._testInputs = numpy.random.random([inputSize])
        net.run(1)
        print s.sendRequest("nodeOPrint pca_node")
예제 #2
0
def createNetwork():
  """
  Set up the following simple network and return it:

    ImageSensorRegion -> SP -> KNNClassifier Region

  """
  net = Network()

  # Add the three regions
  net.addRegion("sensor", "py.ImageSensor",
                json.dumps(DEFAULT_IMAGESENSOR_PARAMS))
  net.addRegion("SP", "py.SPRegion", json.dumps(DEFAULT_SP_PARAMS))
  net.addRegion("classifier","py.KNNClassifierRegion",
                json.dumps(DEFAULT_CLASSIFIER_PARAMS))

  # Link up the regions. Note that we need to create a link from the sensor
  # to the classifier to send in the category labels.
  net.link("sensor", "SP", "UniformLink", "",
           srcOutput = "dataOut", destInput = "bottomUpIn")
  net.link("SP", "classifier", "UniformLink", "",
           srcOutput = "bottomUpOut", destInput = "bottomUpIn")
  net.link("sensor", "classifier", "UniformLink", "",
           srcOutput = "categoryOut", destInput = "categoryIn")

  # Make sure all objects are initialized
  net.initialize()

  return net
예제 #3
0
  def testRunPCANode(self):
    from nupic.engine import *

    numpy.random.RandomState(37)

    inputSize = 8

    net = Network()
    Network.registerRegion(ImageSensor)
    net.addRegion('sensor', 'py.ImageSensor' ,
          '{ width: %d, height: %d }' % (inputSize, inputSize))

    params = """{bottomUpCount: %d,
              SVDSampleCount: 5,
              SVDDimCount: 2}""" % inputSize

    pca = net.addRegion('pca', 'py.PCANode', params)

    #nodeAbove = CreateNode("py.ImageSensor", phase=0, categoryOut=1, dataOut=3,
    #                       width=3, height=1)
    #net.addElement('nodeAbove', nodeAbove)

    linkParams = '{ mapping: in, rfSize: [%d, %d] }' % (inputSize, inputSize)
    net.link('sensor', 'pca', 'UniformLink', linkParams, 'dataOut', 'bottomUpIn')

    net.initialize()

    for i in range(10):
      pca.getSelf()._testInputs = numpy.random.random([inputSize])
      net.run(1)
예제 #4
0
def runExperiment():
  Network.unregisterRegion("ImageSensor")
  Network.registerRegion(ImageSensor)
  Network.registerRegion(PCANode)
  inputSize = 8

  net = Network()
  sensor = net.addRegion(
      "sensor", "py.ImageSensor" ,
      "{ width: %d, height: %d }" % (inputSize, inputSize))

  params = ("{bottomUpCount: %s, "
            " SVDSampleCount: 5, "
            " SVDDimCount: 2}" % inputSize)

  pca = net.addRegion("pca", "py.PCANode", params)

  linkParams = "{ mapping: in, rfSize: [%d, %d] }" % (inputSize, inputSize)
  net.link("sensor", "pca", "UniformLink", linkParams, "dataOut", "bottomUpIn")

  net.initialize()

  for i in range(10):
    pca.getSelf()._testInputs = numpy.random.random([inputSize])
    net.run(1)
    print s.sendRequest("nodeOPrint pca_node")
  def __init__(self, numColumns, L2Params, L4Params, L6aParams, repeat,
               logCalls=False):
    """
    Create a network consisting of multiple columns. Each column contains one L2,
    one L4 and one L6a layers. In addition all the L2 columns are fully
    connected to each other through their lateral inputs.

    :param numColumns: Number of columns to create
    :type numColumns: int
    :param L2Params: constructor parameters for :class:`ColumnPoolerRegion`
    :type L2Params: dict
    :param L4Params:  constructor parameters for :class:`ApicalTMPairRegion`
    :type L4Params: dict
    :param L6aParams: constructor parameters for :class:`GridCellLocationRegion`
    :type L6aParams: dict
    :param repeat: Number of times each pair should be seen to be learned
    :type repeat: int
    :param logCalls: If true, calls to main functions will be logged internally.
                     The log can then be saved with saveLogs(). This allows us
                     to recreate the complete network behavior using
                     rerunExperimentFromLogfile which is very useful for
                     debugging.
    :type logCalls: bool
    """
    # Handle logging - this has to be done first
    self.logCalls = logCalls

    self.numColumns = numColumns
    self.repeat = repeat

    network = Network()
    self.network = createMultipleL246aLocationColumn(network=network,
                                                     numberOfColumns=self.numColumns,
                                                     L2Params=L2Params,
                                                     L4Params=L4Params,
                                                     L6aParams=L6aParams)
    network.initialize()

    self.sensorInput = []
    self.motorInput = []
    self.L2Regions = []
    self.L4Regions = []
    self.L6aRegions = []
    for i in xrange(self.numColumns):
      col = str(i)
      self.sensorInput.append(network.regions["sensorInput_" + col].getSelf())
      self.motorInput.append(network.regions["motorInput_" + col].getSelf())
      self.L2Regions.append(network.regions["L2_" + col])
      self.L4Regions.append(network.regions["L4_" + col])
      self.L6aRegions.append(network.regions["L6a_" + col])

    if L6aParams is not None and "dimensions" in L6aParams:
      self.dimensions = L6aParams["dimensions"]
    else:
      self.dimensions = 2

    self.sdrSize = L2Params["sdrSize"]

    # will be populated during training
    self.learnedObjects = {}
예제 #6
0
def main():
  # Create Network instance
  network = Network()

  # Add three TestNode regions to network
  network.addRegion("region1", "TestNode", "")
  network.addRegion("region2", "TestNode", "")
  network.addRegion("region3", "TestNode", "")

  # Set dimensions on first region
  region1 = network.getRegions().getByName("region1")
  region1.setDimensions(Dimensions([1, 1]))

  # Link regions
  network.link("region1", "region2", "UniformLink", "")
  network.link("region2", "region1", "UniformLink", "")
  network.link("region1", "region3", "UniformLink", "")
  network.link("region2", "region3", "UniformLink", "")

  # Initialize network
  network.initialize()

  # Initialize Network Visualizer
  viz = NetworkVisualizer(network)

  # Render w/ graphviz
  viz.render(renderer=GraphVizRenderer)

  # Render w/ networkx
  viz.render(renderer=NetworkXRenderer)
def createNetwork():
  """
  Set up the following simple network and return it:

    ImageSensorRegion -> SP -> KNNClassifier Region

  """
  net = Network()

  # Add the three regions
  net.addRegion("sensor", "py.ImageSensor",
                json.dumps(DEFAULT_IMAGESENSOR_PARAMS))
  net.addRegion("SP", "py.SPRegion", json.dumps(DEFAULT_SP_PARAMS))
  net.addRegion("classifier","py.KNNClassifierRegion",
                json.dumps(DEFAULT_CLASSIFIER_PARAMS))

  # Link up the regions. Note that we need to create a link from the sensor
  # to the classifier to send in the category labels.
  net.link("sensor", "SP", "UniformLink", "",
           srcOutput = "dataOut", destInput = "bottomUpIn")
  net.link("SP", "classifier", "UniformLink", "",
           srcOutput = "bottomUpOut", destInput = "bottomUpIn")
  net.link("sensor", "classifier", "UniformLink", "",
           srcOutput = "categoryOut", destInput = "categoryIn")

  # Make sure all objects are initialized
  net.initialize()

  return net
예제 #8
0
    def testRunPCANode(self):
        from nupic.engine import *

        numpy.random.RandomState(37)

        inputSize = 8

        net = Network()
        Network.registerRegion(ImageSensor)
        net.addRegion('sensor', 'py.ImageSensor',
                      '{ width: %d, height: %d }' % (inputSize, inputSize))

        params = """{bottomUpCount: %d,
              SVDSampleCount: 5,
              SVDDimCount: 2}""" % inputSize

        pca = net.addRegion('pca', 'py.PCANode', params)

        #nodeAbove = CreateNode("py.ImageSensor", phase=0, categoryOut=1, dataOut=3,
        #                       width=3, height=1)
        #net.addElement('nodeAbove', nodeAbove)

        linkParams = '{ mapping: in, rfSize: [%d, %d] }' % (inputSize,
                                                            inputSize)
        net.link('sensor', 'pca', 'UniformLink', linkParams, 'dataOut',
                 'bottomUpIn')

        net.initialize()

        for i in range(10):
            pca.getSelf()._testInputs = numpy.random.random([inputSize])
            net.run(1)
예제 #9
0
def createNetwork(dataSource, rdse_resolution, cellsPerMiniColumn=32):
    """Create the Network instance.

    The network has a sensor region reading data from `dataSource` and passing
    the encoded representation to an SPRegion. The SPRegion output is passed to
    a TMRegion.

    :param dataSource: a RecordStream instance to get data from
    :param cellsPerMiniColumn: int, number of cells per mini-column. Default=32
    :returns: a Network instance ready to run
    """
    try:
        with open(_PARAMS_PATH, "r") as f:
            modelParams = yaml.safe_load(f)["modelParams"]
    except:
        with open(os.path.join("..", _PARAMS_PATH), "r") as f:
            modelParams = yaml.safe_load(f)["modelParams"]

    # Create a network that will hold the regions.
    network = Network()

    # Add a sensor region.
    network.addRegion("sensor", "py.RecordSensor", '{}')

    # Set the encoder and data source of the sensor region.
    sensorRegion = network.regions["sensor"].getSelf()
    #sensorRegion.encoder = createEncoder(modelParams["sensorParams"]["encoders"])
    sensorRegion.encoder = createEncoder(rdse_resolution)
    sensorRegion.dataSource = dataSource

    # Make sure the SP input width matches the sensor region output width.
    modelParams["spParams"]["inputWidth"] = sensorRegion.encoder.getWidth()
    modelParams["tmParams"]["cellsPerColumn"] = cellsPerMiniColumn

    # Add SP and TM regions.
    network.addRegion("spatialPoolerRegion", "py.SPRegion",
                      json.dumps(modelParams["spParams"]))
    network.addRegion("temporalPoolerRegion", "py.TMRegion",
                      json.dumps(modelParams["tmParams"]))

    # Add a classifier region.
    clName = "py.%s" % modelParams["clParams"].pop("regionName")
    network.addRegion("classifier", clName,
                      json.dumps(modelParams["clParams"]))

    # Add all links
    createSensorToClassifierLinks(network, "sensor", "classifier")
    createDataOutLink(network, "sensor", "spatialPoolerRegion")
    createFeedForwardLink(network, "spatialPoolerRegion",
                          "temporalPoolerRegion")
    createFeedForwardLink(network, "temporalPoolerRegion", "classifier")
    # Reset links are optional, since the sensor region does not send resets.
    createResetLink(network, "sensor", "spatialPoolerRegion")
    createResetLink(network, "sensor", "temporalPoolerRegion")

    # Make sure all objects are initialized.
    network.initialize()

    return network
예제 #10
0
def createNetwork():
    """Create the Network instance.

    The network has a sensor region reading data from `rataSource` and passing
    the encoded representation to an SPRegion. The SPRegion output is passed to
    a TPRegion.

    :param dataSource: a RecordStream instance to get data from
    :returns: a Network instance ready to run
    """
    network = Network()

    # Create Sensor
    network.addRegion("sensor", "py.RecordSensor", json.dumps({"verbosity":
                                                               0}))
    sensor = network.regions["sensor"].getSelf()
    sensor.encoder = createSensorEncoder()
    sensor.dataSource = DataBuffer()

    # Add the spatial pooler region
    PARAMS['SP']["inputWidth"] = sensor.encoder.getWidth()
    network.addRegion("spatialPoolerRegion", "py.SPRegion",
                      json.dumps(PARAMS['SP']))
    network.link("sensor", "spatialPoolerRegion", "UniformLink", "")

    # Add the TPRegion on top of the SPRegion
    network.addRegion("temporalPoolerRegion", "py.TPRegion",
                      json.dumps(PARAMS['TP']))
    network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink",
                 "")

    # Add classifier
    network.addRegion("classifierRegion", "py.CLAClassifierRegion",
                      json.dumps(PARAMS['CL']))

    network.initialize()

    # Make sure learning is enabled
    spatialPoolerRegion = network.regions["spatialPoolerRegion"]
    spatialPoolerRegion.setParameter("learningMode", True)
    spatialPoolerRegion.setParameter("anomalyMode", True)

    temporalPoolerRegion = network.regions["temporalPoolerRegion"]
    temporalPoolerRegion.setParameter("topDownMode", False)
    temporalPoolerRegion.setParameter("learningMode", True)
    temporalPoolerRegion.setParameter("inferenceMode", True)
    temporalPoolerRegion.setParameter("anomalyMode", False)

    classifierRegion = network.regions["classifierRegion"]
    classifierRegion.setParameter('inferenceMode', True)
    classifierRegion.setParameter('learningMode', True)

    return network
def createNetwork():
    """Create the Network instance.

    The network has a sensor region reading data from `rataSource` and passing
    the encoded representation to an SPRegion. The SPRegion output is passed to
    a TPRegion.

    :param dataSource: a RecordStream instance to get data from
    :returns: a Network instance ready to run
    """
    network = Network()

    # Create Sensor
    network.addRegion("sensor", "py.RecordSensor", json.dumps({"verbosity": 0}))
    sensor = network.regions["sensor"].getSelf()
    sensor.encoder    = createSensorEncoder()
    sensor.dataSource = DataBuffer()

    # Add the spatial pooler region
    PARAMS['SP']["inputWidth"] = sensor.encoder.getWidth()
    network.addRegion("spatialPoolerRegion", "py.SPRegion", json.dumps(PARAMS['SP']))
    network.link("sensor", "spatialPoolerRegion", "UniformLink", "")

    # Add the TPRegion on top of the SPRegion
    network.addRegion("temporalPoolerRegion", "py.TPRegion", json.dumps(PARAMS['TP']))
    network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink", "")

    # Add classifier
    network.addRegion( "classifierRegion", "py.CLAClassifierRegion", json.dumps(PARAMS['CL']))

    network.initialize()



    # Make sure learning is enabled
    spatialPoolerRegion = network.regions["spatialPoolerRegion"]
    spatialPoolerRegion.setParameter("learningMode", True)
    spatialPoolerRegion.setParameter("anomalyMode", True)

    temporalPoolerRegion = network.regions["temporalPoolerRegion"]
    temporalPoolerRegion.setParameter("topDownMode", False)
    temporalPoolerRegion.setParameter("learningMode", True)
    temporalPoolerRegion.setParameter("inferenceMode", True)
    temporalPoolerRegion.setParameter("anomalyMode", False)

    classifierRegion = network.regions["classifierRegion"]
    classifierRegion.setParameter('inferenceMode', True)
    classifierRegion.setParameter('learningMode', True)

    return network
예제 #12
0
def createNetwork(dataSource):
    """Create the Network instance.

  The network has a sensor region reading data from `dataSource` and passing
  the encoded representation to an Identity Region.

  :param dataSource: a RecordStream instance to get data from
  :returns: a Network instance ready to run
  """
    network = Network()

    # Our input is sensor data from the gym file. The RecordSensor region
    # allows us to specify a file record stream as the input source via the
    # dataSource attribute.
    network.addRegion("sensor", "py.RecordSensor",
                      json.dumps({"verbosity": _VERBOSITY}))
    sensor = network.regions["sensor"].getSelf()
    # The RecordSensor needs to know how to encode the input values
    sensor.encoder = createEncoder()
    # Specify the dataSource as a file record stream instance
    sensor.dataSource = dataSource

    # CUSTOM REGION
    # Add path to custom region to PYTHONPATH
    # NOTE: Before using a custom region, please modify your PYTHONPATH
    # export PYTHONPATH="<path to custom region module>:$PYTHONPATH"
    # In this demo, we have modified it using sys.path.append since we need it to
    # have an effect on this program.
    sys.path.append(os.path.dirname(os.path.abspath(__file__)))

    from custom_region.identity_region import IdentityRegion

    # Add custom region class to the network
    Network.registerRegion(IdentityRegion)

    # Create a custom region
    network.addRegion("identityRegion", "py.IdentityRegion",
                      json.dumps(I_PARAMS))

    # Link the Identity region to the sensor input
    network.link("sensor",
                 "identityRegion",
                 "UniformLink",
                 "",
                 srcOutput="sourceOut",
                 destInput="in")

    network.initialize()

    return network
예제 #13
0
def createNetwork(dataSource):
    '''
  Create and initialize a network.
  '''

    with open(_PARAMS_PATH, "r") as f:
        modelParams = yaml.safe_load(f)["modelParams"]

    # Create a network that will hold the regions.
    network = Network()

    # Add a sensor region.
    network.addRegion("sensor", "py.RecordSensor", "{}")

    # Set the encoder and data source of the sensor region.
    sensorRegion = network.regions["sensor"].getSelf()
    sensorRegion.encoder = createEncoder(
        modelParams["sensorParams"]["encoders"])
    sensorRegion.dataSource = dataSource

    # Make sure the SP input width matches the sensor region output width.
    modelParams["spParams"]["inputWidth"] = sensorRegion.encoder.getWidth()

    # Add SP and TP regions.
    network.addRegion("SP", "py.SPRegion", json.dumps(modelParams["spParams"]))
    network.addRegion("TM", "py.TMRegion", json.dumps(modelParams["tmParams"]))

    # Add a classifier region.
    clName = "py.%s" % modelParams["clParams"].pop("regionName")
    network.addRegion("classifier", clName,
                      json.dumps(modelParams["clParams"]))

    classifierRegion = network.regions["classifier"].getSelf()

    # Add all links
    createSensorToClassifierLinks(network, "sensor", "classifier")
    createDataOutLink(network, "sensor", "SP")
    createFeedForwardLink(network, "SP", "TM")
    createFeedForwardLink(network, "TM", "classifier")
    # Reset links are optional, since the sensor region does not send resets.
    createResetLink(network, "sensor", "SP")
    createResetLink(network, "sensor", "TM")

    # Make sure all objects are initialized.
    network.initialize()

    return network
예제 #14
0
def createNetwork(dataSource):
  """Create the Network instance.

  The network has a sensor region reading data from `dataSource` and passing
  the encoded representation to an Identity Region.

  :param dataSource: a RecordStream instance to get data from
  :returns: a Network instance ready to run
  """
  network = Network()

  # Our input is sensor data from the gym file. The RecordSensor region
  # allows us to specify a file record stream as the input source via the
  # dataSource attribute.
  network.addRegion("sensor", "py.RecordSensor",
                    json.dumps({"verbosity": _VERBOSITY}))
  sensor = network.regions["sensor"].getSelf()
  # The RecordSensor needs to know how to encode the input values
  sensor.encoder = createEncoder()
  # Specify the dataSource as a file record stream instance
  sensor.dataSource = dataSource

  # CUSTOM REGION
  # Add path to custom region to PYTHONPATH
  # NOTE: Before using a custom region, please modify your PYTHONPATH
  # export PYTHONPATH="<path to custom region module>:$PYTHONPATH"
  # In this demo, we have modified it using sys.path.append since we need it to
  # have an effect on this program.
  sys.path.append(os.path.dirname(os.path.abspath(__file__)))
  
  from custom_region.identity_region import IdentityRegion

  # Add custom region class to the network
  Network.registerRegion(IdentityRegion)

  # Create a custom region
  network.addRegion("identityRegion", "py.IdentityRegion",
                    json.dumps({
                      "dataWidth": sensor.encoder.getWidth(),
                    }))

  # Link the Identity region to the sensor output
  network.link("sensor", "identityRegion", "UniformLink", "")

  network.initialize()

  return network
예제 #15
0
  def _testNetLoad(self):
    """Test loading a network with this sensor in it."""
    n = Network()
    r = n.addRegion(self.nodeName, self.sensorName, '{ activeOutputCount: 11}')
    r.dimensions = Dimensions([1])
    n.save(self.filename)

    n = Network(self.filename)
    n.initialize()
    self.testsPassed += 1

    # Check that vectorCount parameter is zero
    r = n.regions[self.nodeName]

    res = r.getParameter('vectorCount')
    self.assertEqual(
        res, 0, "getting vectorCount:\n Expected '0',  got back  '%d'\n" % res)

    self.sensor = r
예제 #16
0
  def _testNetLoad(self):
    """Test loading a network with this sensor in it."""
    n = Network()
    r = n.addRegion(self.nodeName, self.sensorName, '{ activeOutputCount: 11}')
    r.dimensions = Dimensions([1])
    n.save(self.filename)

    n = Network(self.filename)
    n.initialize()
    self.testsPassed += 1

    # Check that vectorCount parameter is zero
    r = n.regions[self.nodeName]

    res = r.getParameter('vectorCount')
    self.assertEqual(
        res, 0, "getting vectorCount:\n Expected '0',  got back  '%d'\n" % res)

    self.sensor = r
예제 #17
0
  def testLinkingDownwardDimensions(self):
    #
    # Linking can induce dimensions downward
    #
    net = Network()
    level1 = net.addRegion("level1", "TestNode", "")
    level2 = net.addRegion("level2", "TestNode", "")
    dims = Dimensions([3, 2])
    level2.setDimensions(dims)
    net.link("level1", "level2", "TestFanIn2", "")
    net.initialize()

    # Level1 should now have dimensions [6, 4]
    self.assertEquals(level1.getDimensions()[0], 6)
    self.assertEquals(level1.getDimensions()[1], 4)

    #
    # We get nice error messages when network can't be initialized
    #
    LOGGER.info("=====")
    LOGGER.info("Creating a 3 level network in which levels 1 and 2 have")
    LOGGER.info("dimensions but network initialization will fail because")
    LOGGER.info("level3 does not have dimensions")
    LOGGER.info("Error message follows:")

    net = Network()
    level1 = net.addRegion("level1", "TestNode", "")
    level2 = net.addRegion("level2", "TestNode", "")
    _level3 = net.addRegion("level3", "TestNode", "")
    dims = Dimensions([6, 4])
    level1.setDimensions(dims)
    net.link("level1", "level2", "TestFanIn2", "")
    self.assertRaises(RuntimeError, net.initialize)
    LOGGER.info("=====")

    LOGGER.info("======")
    LOGGER.info("Creating a link with incompatible dimensions. \
      Error message follows")
    net.link("level2", "level3", "TestFanIn2", "")
    self.assertRaises(RuntimeError, net.initialize)
예제 #18
0
def createNetwork(dataSource):
  """Create and initialize a network."""
  with open(_PARAMS_PATH, "r") as f:
    modelParams = yaml.safe_load(f)["modelParams"]

  # Create a network that will hold the regions.
  network = Network()

  # Add a sensor region.
  network.addRegion("sensor", "py.RecordSensor", '{}')

  # Set the encoder and data source of the sensor region.
  sensorRegion = network.regions["sensor"].getSelf()
  sensorRegion.encoder = createEncoder(modelParams["sensorParams"]["encoders"])
  sensorRegion.dataSource = dataSource

  # Make sure the SP input width matches the sensor region output width.
  modelParams["spParams"]["inputWidth"] = sensorRegion.encoder.getWidth()

  # Add SP and TM regions.
  network.addRegion("SP", "py.SPRegion", json.dumps(modelParams["spParams"]))
  network.addRegion("TM", "py.TMRegion", json.dumps(modelParams["tmParams"]))

  # Add a classifier region.
  clName = "py.%s" % modelParams["clParams"].pop("regionName")
  network.addRegion("classifier", clName, json.dumps(modelParams["clParams"]))

  # Add all links
  createSensorToClassifierLinks(network, "sensor", "classifier")
  createDataOutLink(network, "sensor", "SP")
  createFeedForwardLink(network, "SP", "TM")
  createFeedForwardLink(network, "TM", "classifier")
  # Reset links are optional, since the sensor region does not send resets.
  createResetLink(network, "sensor", "SP")
  createResetLink(network, "sensor", "TM")

  # Make sure all objects are initialized.
  network.initialize()

  return network
예제 #19
0
    def testLinkingDownwardDimensions(self):
        #
        # Linking can induce dimensions downward
        #
        net = Network()
        level1 = net.addRegion("level1", "TestNode", "")
        level2 = net.addRegion("level2", "TestNode", "")
        dims = Dimensions([3, 2])
        level2.setDimensions(dims)
        net.link("level1", "level2", "TestFanIn2", "")
        net.initialize()

        # Level1 should now have dimensions [6, 4]
        self.assertEqual(level1.getDimensions()[0], 6)
        self.assertEqual(level1.getDimensions()[1], 4)

        #
        # We get nice error messages when network can't be initialized
        #
        LOGGER.info("=====")
        LOGGER.info("Creating a 3 level network in which levels 1 and 2 have")
        LOGGER.info("dimensions but network initialization will fail because")
        LOGGER.info("level3 does not have dimensions")
        LOGGER.info("Error message follows:")

        net = Network()
        level1 = net.addRegion("level1", "TestNode", "")
        level2 = net.addRegion("level2", "TestNode", "")
        _level3 = net.addRegion("level3", "TestNode", "")
        dims = Dimensions([6, 4])
        level1.setDimensions(dims)
        net.link("level1", "level2", "TestFanIn2", "")
        self.assertRaises(RuntimeError, net.initialize)
        LOGGER.info("=====")

        LOGGER.info("======")
        LOGGER.info("Creating a link with incompatible dimensions. \
      Error message follows")
        net.link("level2", "level3", "TestFanIn2", "")
        self.assertRaises(RuntimeError, net.initialize)
예제 #20
0
def inspect(element, showRun=True, icon=None):
    """
  Launch an Inspector for the provided element.

  element -- A network, region or a path to a network directory.
  showRun -- Whether to show the RuntimeInspector in the dropdown, which lets
             the user run the network.
  """
    if isinstance(element, basestring):
        element = Network(element)
    else:
        assert isinstance(element, Network)

    if len(element.regions) == 0:
        raise Exception("Unable to inspect an empty network")

    # Network must be initialized before it can be inspected
    element.initialize()

    from wx import GetApp, PySimpleApp

    if GetApp():
        useApp = True
    else:
        useApp = False

    from nupic.analysis.inspectors.MultiInspector import MultiInspector

    if not useApp:
        app = PySimpleApp()

    inspector = MultiInspector(element=element, showRun=showRun, icon=icon)

    if not useApp:
        app.MainLoop()
        app.Destroy()
    else:
        return inspector
예제 #21
0
def inspect(element, showRun=True, icon=None):
    """
  Launch an Inspector for the provided element.

  element -- A network, region or a path to a network directory.
  showRun -- Whether to show the RuntimeInspector in the dropdown, which lets
             the user run the network.
  """
    if isinstance(element, basestring):
        element = Network(element)
    else:
        assert isinstance(element, Network)

    if len(element.regions) == 0:
        raise Exception('Unable to inspect an empty network')

    # Network must be initialized before it can be inspected
    element.initialize()

    from wx import GetApp, PySimpleApp

    if GetApp():
        useApp = True
    else:
        useApp = False

    from nupic.analysis.inspectors.MultiInspector import MultiInspector

    if not useApp:
        app = PySimpleApp()

    inspector = MultiInspector(element=element, showRun=showRun, icon=icon)

    if not useApp:
        app.MainLoop()
        app.Destroy()
    else:
        return inspector
예제 #22
0
def main():
  # Create Network instance
  network = Network()

  # Add three TestNode regions to network
  network.addRegion("region1", "TestNode", "")
  network.addRegion("region2", "TestNode", "")
  network.addRegion("region3", "TestNode", "")

  # Set dimensions on first region
  region1 = network.getRegions().getByName("region1")
  region1.setDimensions(Dimensions([1, 1]))

  # Link regions
  network.link("region1", "region2", "UniformLink", "")
  network.link("region2", "region1", "UniformLink", "")
  network.link("region1", "region3", "UniformLink", "")
  network.link("region2", "region3", "UniformLink", "")

  # Initialize network
  network.initialize()

  # Initialize Network Visualizer
  viz = NetworkVisualizer(network)

  # Render w/ graphviz
  viz.render(renderer=GraphVizRenderer)

  # Render w/ networkx
  viz.render(renderer=NetworkXRenderer)

  # Render to dot (stdout)
  viz.render(renderer=DotRenderer)

  # Render to dot (file)
  viz.render(renderer=lambda: DotRenderer(open("example.dot", "w")))
def createNetwork(dataSource):
  """Create the Network instance.

  The network has a sensor region reading data from `dataSource` and passing
  the encoded representation to an SPRegion. The SPRegion output is passed to
  a TPRegion.

  :param dataSource: a RecordStream instance to get data from
  :returns: a Network instance ready to run
  """
  network = Network()

  # Our input is sensor data from the gym file. The RecordSensor region
  # allows us to specify a file record stream as the input source via the
  # dataSource attribute.
  network.addRegion("sensor", "py.RecordSensor",
                    json.dumps({"verbosity": _VERBOSITY}))
  sensor = network.regions["sensor"].getSelf()
  # The RecordSensor needs to know how to encode the input values
  sensor.encoder = createEncoder()
  # Specify the dataSource as a file record stream instance
  sensor.dataSource = dataSource

  # Create the spatial pooler region
  SP_PARAMS["inputWidth"] = sensor.encoder.getWidth()
  network.addRegion("spatialPoolerRegion", "py.SPRegion", json.dumps(SP_PARAMS))

  # Link the SP region to the sensor input
  network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
  network.link("sensor", "spatialPoolerRegion", "UniformLink", "",
               srcOutput="resetOut", destInput="resetIn")
  network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
               srcOutput="spatialTopDownOut", destInput="spatialTopDownIn")
  network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
               srcOutput="temporalTopDownOut", destInput="temporalTopDownIn")

  # Add the TPRegion on top of the SPRegion
  # TODO: Needs TMRegion
  network.addRegion("temporalMemoryRegion", "py.TPRegion",
                    json.dumps(TP_PARAMS))

  network.link("spatialPoolerRegion", "temporalMemoryRegion", "UniformLink", "")
  network.link("temporalMemoryRegion", "spatialPoolerRegion", "UniformLink", "",
               srcOutput="topDownOut", destInput="topDownIn")

  # Register UPRegion since we aren't in nupic
  curDirectory = os.path.dirname(os.path.abspath(__file__))
  # directory containing the union pooler directory is 2 directories above this file
  unionPoolerDirectory = os.path.split((os.path.split(curDirectory))[0])[0]
  sys.path.append(unionPoolerDirectory)
  Network.registerRegionPackage("union_pooling")

  # Add the UPRegion on top of the TPRegion
  temporal = network.regions["temporalMemoryRegion"].getSelf()
  UP_PARAMS["inputWidth"] = temporal.getOutputElementCount("bottomUpOut")
  network.addRegion("unionPoolerRegion", "py.PoolingRegion", json.dumps(UP_PARAMS))

  network.link("temporalMemoryRegion", "unionPoolerRegion", "UniformLink", "",
               srcOutput="activeCells", destInput="activeCells")
  network.link("temporalMemoryRegion", "unionPoolerRegion", "UniformLink", "",
               srcOutput="predictedActiveCells", destInput="predictedActiveCells")

  network.initialize()

  spatial = network.regions["spatialPoolerRegion"].getSelf()
  # Make sure learning is enabled (this is the default)
  spatial.setParameter("learningMode", 1, True)
  # We want temporal anomalies so disable anomalyMode in the SP. This mode is
  # used for computing anomalies in a non-temporal model.
  spatial.setParameter("anomalyMode", 1, False)

  # Enable topDownMode to get the predicted columns output
  temporal.setParameter("topDownMode", 1, True)
  # Make sure learning is enabled (this is the default)
  temporal.setParameter("learningMode", 1, True)
  # Enable inference mode so we get predictions
  temporal.setParameter("inferenceMode", 1, True)
  temporal.setParameter("computePredictedActiveCellIndices", 1, True)

  union = network.regions["unionPoolerRegion"].getSelf()
  # Make sure learning is enabled (this is the default)
  union.setParameter("learningMode", 1, True)

  return network
class FunctionRecogniter():

    def __init__(self):

        self.run_number = 0

        # for classifier
        self.classifier_encoder_list = {}
        self.classifier_input_list   = {}
        self.prevPredictedColumns    = {}

        self.selectivity = "region2"

        # net structure
        self.net_structure = OrderedDict()
        self.net_structure['sensor3'] = ['region1']
        self.net_structure['region1'] = ['region2']

        # self.net_structure['sensor1'] = ['region1']
        # self.net_structure['sensor2'] = ['region2']
        # self.net_structure['region1'] = ['region3']
        # self.net_structure['region2'] = ['region3']



        # region change params
        self.dest_resgion_data = {
                'region1': {
                    'TP_PARAMS':{
                        "cellsPerColumn": 8,
                        "permanenceInc": 0.2,
                        "permanenceDec": 0.1,
                        #"permanenceDec": 0.0001,
                        },
                    },
                'region2': {
                    'SP_PARAMS':{
                        "inputWidth": 2024 * (8),
                        },
                    'TP_PARAMS':{
                        "cellsPerColumn": 8,
                        "permanenceInc": 0.2,
                        "permanenceDec": 0.1,
                        },
                    },
                # 'region3': {
                #     'SP_PARAMS':{
                #         "inputWidth": 2024 * (8),
                #         },
                #     'TP_PARAMS':{
                #         "cellsPerColumn": 8,
                #         },
                #     },
                 }

        # sensor change params
        self.sensor_params = {
                'sensor1': {
                    'xy_value': None,
                    'x_value': {
                        "fieldname": u"x_value",
                        "name": u"x_value",
                        "type": "ScalarEncoder",
                        'maxval': 100.0,
                        'minval':  0.0,
                        "n": 200,
                        "w": 21,
                        "clipInput": True
                        },
                    },
                'sensor2': {
                    'xy_value': None,
                    'y_value': {
                        "fieldname": u"y_value",
                        "name": u"y_value",
                        "type": "ScalarEncoder",
                        'maxval': 100.0,
                        'minval':  0.0,
                        "n": 200,
                        "w": 21,
                        "clipInput": True
                        },
                    },
                'sensor3': {
                    'xy_value': {
                        'maxval': 100.0,
                        'minval':   0.0
                        },
                    },
                # 'sensor3': {
                #     'xy_value': {
                #         'maxval': 100.0,
                #         'minval':  40.0
                #         },
                #     },
                }

        self._createNetwork()


        # for evaluate netwrok accuracy
        self.evaluation = {}
        for name in self.dest_resgion_data.keys():
            self.evaluation[name] = NetworkEvaluation()

        self.evaluation_2 = {}
        for name in self.dest_resgion_data.keys():
            self.evaluation_2[name] = NetworkEvaluation()


        self.prev_layer_input  = defaultdict(lambda : defaultdict(list))

    def _addRegion(self, src_name, dest_name, params):

        sensor     =  src_name
        sp_name    = "sp_" + dest_name
        tp_name    = "tp_" + dest_name
        class_name = "class_" + dest_name

        try:
            self.network.regions[sp_name]
            self.network.regions[tp_name]
            self.network.regions[class_name]
            self.network.link(sensor, sp_name, "UniformLink", "")

        except Exception as e:
            # sp
            self.network.addRegion(sp_name, "py.SPRegion", json.dumps(params['SP_PARAMS']))
            self.network.link(sensor, sp_name, "UniformLink", "")

            # tp
            self.network.addRegion(tp_name, "py.TPRegion", json.dumps(params['TP_PARAMS']))
            self.network.link(sp_name, tp_name, "UniformLink", "")

            # class
            self.network.addRegion( class_name, "py.CLAClassifierRegion", json.dumps(params['CLASSIFIER_PARAMS']))
            self.network.link(tp_name, class_name, "UniformLink", "")

            encoder = MultiEncoder()
            encoder.addMultipleEncoders(params['CLASSIFIER_ENCODE_PARAMS'])
            self.classifier_encoder_list[class_name]  = encoder
            self.classifier_input_list[class_name]    = tp_name

    def _initRegion(self, name):
        sp_name = "sp_"+ name
        tp_name = "tp_"+ name
        class_name = "class_"+ name

        # setting sp
        SP = self.network.regions[sp_name]
        SP.setParameter("learningMode", True)
        SP.setParameter("anomalyMode", True)

        # setting tp
        TP = self.network.regions[tp_name]
        TP.setParameter("topDownMode", False)
        TP.setParameter("learningMode", True)
        TP.setParameter("inferenceMode", True)
        TP.setParameter("anomalyMode", False)

        # classifier regionを定義.
        classifier = self.network.regions[class_name]
        classifier.setParameter('inferenceMode', True)
        classifier.setParameter('learningMode', True)


    def _createNetwork(self):

        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            if update is None:
                return None
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update




        self.network = Network()

        # check
        if self.selectivity not in self.dest_resgion_data.keys():
            raise Exception, "There is no selected region : " + self.selectivity
        if not len(self.net_structure.keys()) == len(set(self.net_structure.keys())):
            raise Exception, "There is deplicated net_structure keys : " + self.net_structure.keys()

        # sensor
        for sensor_name, change_params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor", json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            params = deepupdate(cn.SENSOR_PARAMS, change_params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders( params )
            sensor.encoder         = encoder

            # set datasource
            sensor.dataSource      = cn.DataBuffer()


        # network
        print 'create network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                change_params = self.dest_resgion_data[dest]
                params = deepupdate(cn.PARAMS, change_params)

                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    params['SP_PARAMS']['inputWidth'] = sensor.encoder.getWidth()
                    self._addRegion(source, dest, params)
                else:
                    #self._addRegion("sp_" + source, dest, params)
                    self._addRegion("tp_" + source, dest, params)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            self._initRegion(name)


        # TODO: 1-3-1構造で, TPのセル数をむやみに増やすことは逆効果になるのでは?

        return


    def run(self, input_data, learn=True, learn_layer=None):
        """
        networkの実行.
        学習したいときは, learn=True, ftypeを指定する.
        予測したいときは, learn=False, ftypeはNoneを指定する.
        学習しているときも, 予測はしているがな.

        input_data = {'xy_value': [1.0, 2.0], 'ftype': 'sin'}
        """

        self.enable_learning_mode(learn, learn_layer)
        self.run_number += 1

        # calc encoder, SP, TP
        for sensor_name in self.sensor_params.keys():
            self.network.regions[sensor_name].getSelf().dataSource.push(input_data)
        self.network.run(1)
        #self.layer_output(input_data)
        #self.debug(input_data)


        # learn classifier
        inferences = {}
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            class_name = "class_" + name
            inferences['classifier_'+name]   = self._learn_classifier_multi(class_name, actValue=input_data['ftype'], pstep=0)



        # anomaly
        inferences["anomaly"] = self._calc_anomaly()

        # output differ
        #inferences["output_differ"] = self._calc_output_differ()

        # # selectivity
        # if input_data['ftype'] is not None and input_data['xy_value'][0] >= 45 and input_data['xy_value'][0] <= 55:
        #     #self.layer_output(input_data)
        #     for name in self.dest_resgion_data.keys():
        #         tp_bottomUpOut = self.network.regions[ "tp_" + name ].getOutputData("bottomUpOut").nonzero()[0]
        #         self.evaluation[name].save_cell_activity(tp_bottomUpOut, input_data['ftype'])
        #
        # if input_data['ftype'] is not None and (input_data['xy_value'][0] <= 5 or input_data['xy_value'][0] >= 95):
        #     for name in self.dest_resgion_data.keys():
        #         tp_bottomUpOut = self.network.regions[ "tp_" + name ].getOutputData("bottomUpOut").nonzero()[0]
        #         self.evaluation_2[name].save_cell_activity(tp_bottomUpOut, input_data['ftype'])

        return inferences


    def _learn_classifier_multi(self, region_name, actValue=None, pstep=0):
        """
        classifierの計算を行う.

        直接customComputeを呼び出さずに, network.runの中でやりたいところだけど,
        計算した内容の取り出し方法がわからない.
        """

        # TODO: networkとclassifierを完全に切り分けたいな.
        #       networkでは, sensor,sp,tpまで計算を行う.
        #       その計算結果の評価/利用は外に出す.

        classifier     = self.network.regions[region_name]
        encoder        = self.classifier_encoder_list[region_name].getEncoderList()[0]
        class_input    = self.classifier_input_list[region_name]
        tp_bottomUpOut = self.network.regions[class_input].getOutputData("bottomUpOut").nonzero()[0]
        #tp_bottomUpOut = self.network.regions["TP"].getSelf()._tfdr.infActiveState['t'].reshape(-1).nonzero()[0]

        if actValue is not None:
            bucketIdx = encoder.getBucketIndices(actValue)[0]
            classificationIn = {
                    'bucketIdx': bucketIdx,
                    'actValue': actValue
                    }
        else:
            classificationIn = {'bucketIdx': 0,'actValue': 'no'}
        clResults = classifier.getSelf().customCompute(
                recordNum=self.run_number,
                patternNZ=tp_bottomUpOut,
                classification=classificationIn
                )

        inferences= self._get_inferences(clResults, pstep, summary_tyep='sum')

        return inferences

    def _get_inferences(self, clResults, steps, summary_tyep='sum'):
        """
        classifierの計算結果を使いやすいように変更するだけ.
        """

        likelihoodsVec = clResults[steps]
        bucketValues   = clResults['actualValues']

        likelihoodsDict = defaultdict(int)
        bestActValue = None
        bestProb = None

        if summary_tyep == 'sum':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                likelihoodsDict[actValue] += prob
                if bestProb is None or likelihoodsDict[actValue] > bestProb:
                    bestProb = likelihoodsDict[actValue]
                    bestActValue = actValue

        elif summary_tyep == 'best':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                if bestProb is None or prob > bestProb:
                    likelihoodsDict[actValue] = prob
                    bestProb = prob
                    bestActValue = actValue

        return {'likelihoodsDict': likelihoodsDict, 'best': {'value': bestActValue, 'prob':bestProb}}

    def _calc_output_differ(self):
        """
        同じ入力があったときに, 前回の入力との差を計算する.
        学習が進んでいるかどうかの指標に出来るかなと思った.

        全く同じ: 0
        全部違う: 1
        """

        score = 0
        #self.prev_layer_input  = defaultdict(lambda defaultdict(list))
        output_differ = {}

        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):

            tp_input = self.network.regions["tp_"+name].getInputData("bottomUpIn").nonzero()[0]
            tp_output = self.network.regions["tp_"+name].getOutputData("bottomUpOut").nonzero()[0]

            if self.prev_layer_input[name].has_key(tuple(tp_input)):
                prev_output = self.prev_layer_input[name][tuple(tp_input)]

                same_cell = (set(prev_output) & set(tp_output))
                output_differ[name] = 1 - float(len(same_cell) )/ len(tp_output)

            self.prev_layer_input[name][tuple(tp_input)] = tp_output

        return output_differ

    def _calc_anomaly(self):
        """
        各層のanomalyを計算
        """

        score = 0
        anomalyScore = {}
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            #sp_bottomUpOut = self.network.regions["sp_"+name].getOutputData("bottomUpOut").nonzero()[0]
            sp_bottomUpOut = self.network.regions["tp_"+name].getInputData("bottomUpIn").nonzero()[0]

            if self.prevPredictedColumns.has_key(name):
                score = computeAnomalyScore(sp_bottomUpOut, self.prevPredictedColumns[name])
            #topdown_predict = self.network.regions["TP"].getSelf()._tfdr.topDownCompute().copy().nonzero()[0]
            topdown_predict = self.network.regions["tp_"+name].getSelf()._tfdr.topDownCompute().nonzero()[0]
            self.prevPredictedColumns[name] = copy.deepcopy(topdown_predict)

            anomalyScore[name] = score

        return anomalyScore

    def reset(self):
        """
        reset sequence
        """
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            self.network.regions["tp_"+name].getSelf().resetSequenceStates()

    def enable_learning_mode(self, enable, layer_name = None):
        """
        各層のSP, TP, ClassifierのlearningModeを変更
        """
        if layer_name is None:
            for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
                self.network.regions["sp_"+name].setParameter("learningMode", enable)
                self.network.regions["tp_"+name].setParameter("learningMode", enable)
                self.network.regions["class_"+name].setParameter("learningMode", enable)
        else:
            for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
                self.network.regions["sp_"+name].setParameter("learningMode", not enable)
                self.network.regions["tp_"+name].setParameter("learningMode", not enable)
                self.network.regions["class_"+name].setParameter("learningMode", not enable)
            for name in layer_name:
                self.network.regions["sp_"+name].setParameter("learningMode", enable)
                self.network.regions["tp_"+name].setParameter("learningMode", enable)
                self.network.regions["class_"+name].setParameter("learningMode", enable)


    def print_inferences(self, input_data, inferences):
        """
        計算結果を出力する
        """

        print "%10s, %10s, %1s" % (
                int(input_data['xy_value'][0]),
                int(input_data['xy_value'][1]),
                input_data['ftype'][:1]),


        for name in sorted(self.dest_resgion_data.keys()):
            print "%1s" % (inferences['classifier_'+name]['best']['value'][:1]),

        for name in sorted(self.dest_resgion_data.keys()):
            print "%6.4f," % (inferences['classifier_'+name]['likelihoodsDict'][input_data['ftype']]),

        for name in sorted(self.dest_resgion_data.keys()):
            print "%3.2f," % (inferences["anomaly"][name]),

        # for name in sorted(self.dest_resgion_data.keys()):
        #     print "%5s," % name,

        print

    def layer_output(self, input_data, region_name=None):
        if region_name is not None:
            Region = self.network.regions[region_name]
            print Region.getOutputData("bottomUpOut").nonzero()[0]
            return

        for name in self.dest_resgion_data.keys():
            SPRegion = self.network.regions["sp_"+name]
            TPRegion = self.network.regions["tp_"+name]

            print "#################################### ", name
            print
            print "==== SP layer ===="
            print "input:  ", SPRegion.getInputData("bottomUpIn").nonzero()[0]
            print "output: ", SPRegion.getOutputData("bottomUpOut").nonzero()[0]
            print
            print "==== TP layer ===="
            print "input:  ", TPRegion.getInputData("bottomUpIn").nonzero()[0]
            print "output: ", TPRegion.getOutputData("bottomUpOut").nonzero()[0]
            print
            print "==== Predict ===="
            print TPRegion.getSelf()._tfdr.topDownCompute().copy().nonzero()[0][:10]
            print
예제 #25
0
    def testSimpleMulticlassNetwork(self):

        # Setup data record stream of fake data (with three categories)
        filename = _getTempFileName()
        fields = [("timestamp", "datetime", "T"), ("value", "float", ""),
                  ("reset", "int", "R"), ("sid", "int", "S"),
                  ("categories", "list", "C")]
        records = ([datetime(day=1, month=3, year=2010), 0.0, 1, 0, ""], [
            datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1 2"
        ], [datetime(day=3, month=3, year=2010), 1.0, 0, 0,
            "1 2"], [datetime(day=4, month=3, year=2010), 2.0, 0, 0, "0"], [
                datetime(day=5, month=3, year=2010), 3.0, 0, 0, "1 2"
            ], [datetime(day=6, month=3, year=2010), 5.0, 0, 0,
                "1 2"], [datetime(day=7, month=3, year=2010), 8.0, 0, 0, "0"],
                   [datetime(day=8, month=3, year=2010), 13.0, 0, 0, "1 2"])
        dataSource = FileRecordStream(streamID=filename,
                                      write=True,
                                      fields=fields)
        for r in records:
            dataSource.appendRecord(list(r))

        # Create the network and get region instances.
        net = Network()
        net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
        net.addRegion("classifier", "py.KNNClassifierRegion",
                      "{'k': 2,'distThreshold': 0,'maxCategoryCount': 3}")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="dataOut",
                 destInput="bottomUpIn")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="categoryOut",
                 destInput="categoryIn")
        sensor = net.regions["sensor"]
        classifier = net.regions["classifier"]

        # Setup sensor region encoder and data stream.
        dataSource.close()
        dataSource = FileRecordStream(filename)
        sensorRegion = sensor.getSelf()
        sensorRegion.encoder = MultiEncoder()
        sensorRegion.encoder.addEncoder(
            "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
        sensorRegion.dataSource = dataSource

        # Get ready to run.
        net.initialize()

        # Train the network (by default learning is ON in the classifier, but assert
        # anyway) and then turn off learning and turn on inference mode.
        self.assertEqual(classifier.getParameter("learningMode"), 1)
        net.run(8)
        classifier.setParameter("inferenceMode", 1)
        classifier.setParameter("learningMode", 0)

        # Assert learning is OFF and that the classifier learned the dataset.
        self.assertEqual(classifier.getParameter("learningMode"), 0,
                         "Learning mode is not turned off.")
        self.assertEqual(classifier.getParameter("inferenceMode"), 1,
                         "Inference mode is not turned on.")
        self.assertEqual(
            classifier.getParameter("categoryCount"), 3,
            "The classifier should count three total categories.")
        # classififer learns 12 patterns b/c there are 12 categories amongst the
        # records:
        self.assertEqual(
            classifier.getParameter("patternCount"), 12,
            "The classifier should've learned 12 samples in total.")

        # Test the network on the same data as it trained on; should classify with
        # 100% accuracy.
        expectedCats = ([0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5],
                        [0.5, 0.5, 0.0], [0.0, 0.5,
                                          0.5], [0.0, 0.5,
                                                 0.5], [0.5, 0.5,
                                                        0.0], [0.0, 0.5, 0.5])
        dataSource.rewind()
        for i in xrange(8):
            net.run(1)
            inferredCats = classifier.getOutputData("categoriesOut")
            self.assertSequenceEqual(
                expectedCats[i], inferredCats.tolist(),
                "Classififer did not infer expected category probabilites for record "
                "number {}.".format(i))

        # Close data stream, delete file.
        dataSource.close()
        os.remove(filename)
예제 #26
0
  def testSerialization(self):
    n = Network()

    imageDims = (42, 38)
    params = dict(
      width=imageDims[0],
      height=imageDims[1],
      mode="bw",
      background=1,
      invertOutput=1)

    sensor = n.addRegion("sensor", "py.ImageSensor", json.dumps(params))
    sensor.setDimensions(Dimensions(imageDims[0], imageDims[1]))

    params = dict(
      inputShape=imageDims,
      coincidencesShape=imageDims,
      disableTemporal=1,
      tpSeed=43,
      spSeed=42,
      nCellsPerCol=1)

    l1 = n.addRegion("l1", "py.CLARegion", json.dumps(params))

    params = dict(
      maxCategoryCount=48,
      SVDSampleCount=400,
      SVDDimCount=5,
      distanceNorm=0.6)

    _classifier = n.addRegion("classifier", "py.KNNClassifierRegion",
                              json.dumps(params))

    # TODO: link params should not be required. Dest region dimensions are
    # already specified as [1]
    params = dict(
      mapping="in",
      rfSize=imageDims)

    n.link("sensor", "l1", "UniformLink", json.dumps(params))
    n.link("l1", "classifier", "UniformLink", "", "bottomUpOut", "bottomUpIn")
    n.link("sensor", "classifier", "UniformLink", "", "categoryOut",
           "categoryIn")
    n.initialize()

    n.save("fdr.nta")

    # Make sure the network bundle has all the expected files
    self.assertTrue(os.path.exists("fdr.nta/network.yaml"))
    self.assertTrue(os.path.exists("fdr.nta/R0-pkl"))
    self.assertTrue(os.path.exists("fdr.nta/R1-pkl"))
    self.assertTrue(os.path.exists("fdr.nta/R2-pkl"))

    n2 = Network("fdr.nta")
    n2.initialize()  # should not fail

    # Make sure the network is actually the same
    sensor = n2.regions['sensor']
    self.assertEqual(sensor.type, "py.ImageSensor")
    # would like to directly compare, but can't -- NPC-6
    self.assertEqual(str(sensor.dimensions), str(Dimensions(42, 38)))
    self.assertEqual(sensor.getParameter("width"), 42)
    self.assertEqual(sensor.getParameter("height"), 38)
    self.assertEqual(sensor.getParameter("mode"), "bw")
    self.assertEqual(sensor.getParameter("background"), 1)
    self.assertEqual(sensor.getParameter("invertOutput"), 1)

    l1 = n2.regions['l1']
    self.assertEqual(l1.type, "py.CLARegion")
    self.assertEqual(str(l1.dimensions), str(Dimensions(1)))
    a = l1.getParameter("inputShape")
    self.assertEqual(len(a), 2)
    self.assertEqual(a[0], 42)
    self.assertEqual(a[1], 38)

    a = l1.getParameter("coincidencesShape")
    self.assertEqual(len(a), 2)
    self.assertEqual(a[0], 42)
    self.assertEqual(a[1], 38)

    self.assertEqual(l1.getParameter("disableTemporal"), 1)
    self.assertEqual(l1.getParameter("spSeed"), 42)
    self.assertEqual(l1.getParameter("tpSeed"), 43)

    cl = n2.regions['classifier']
    self.assertEqual(cl.type, "py.KNNClassifierRegion")
    self.assertEqual(cl.getParameter("maxCategoryCount"), 48)
    self.assertEqual(cl.getParameter("SVDSampleCount"), 400)
    self.assertEqual(cl.getParameter("SVDDimCount"), 5)
    self.assertLess((cl.getParameter("distanceNorm") - 0.6), 0.0001)
    self.assertEqual(str(cl.dimensions), str(Dimensions(1)))

    n2.save("fdr2.nta")

    # now compare the two network bundles -- should be the same
    c = filecmp.dircmp("fdr.nta", "fdr2.nta")
    self.assertEqual(len(c.left_only), 0,
                     "fdr.nta has extra files: %s" % c.left_only)

    self.assertEqual(len(c.right_only), 0,
                     "fdr2.nta has extra files: %s" % c.right_only)

    if len(c.diff_files) > 0:
      _LOGGER.warn("Some bundle files differ: %s\n"
                   "This is expected, as pickle.load() followed by "
                   "pickle.dump() doesn't produce the same file", c.diff_files)
예제 #27
0
  def testSimpleMulticlassNetwork(self):
  
    # Setup data record stream of fake data (with three categories)
    filename = _getTempFileName()
    fields = [("timestamp", "datetime", "T"),
              ("value", "float", ""),
              ("reset", "int", "R"),
              ("sid", "int", "S"),
              ("categories", "list", "C")]
    records = (
      [datetime(day=1, month=3, year=2010), 0.0, 1, 0, ""],
      [datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1 2"],
      [datetime(day=3, month=3, year=2010), 1.0, 0, 0, "1 2"],
      [datetime(day=4, month=3, year=2010), 2.0, 0, 0, "0"],
      [datetime(day=5, month=3, year=2010), 3.0, 0, 0, "1 2"],
      [datetime(day=6, month=3, year=2010), 5.0, 0, 0, "1 2"],
      [datetime(day=7, month=3, year=2010), 8.0, 0, 0, "0"],
      [datetime(day=8, month=3, year=2010), 13.0, 0, 0, "1 2"])
    dataSource = FileRecordStream(streamID=filename, write=True, fields=fields)
    for r in records:
      dataSource.appendRecord(list(r))

    # Create the network and get region instances.
    net = Network()
    net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
    net.addRegion("classifier","py.KNNClassifierRegion",
                  "{'k': 2,'distThreshold': 0,'maxCategoryCount': 3}")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput = "dataOut", destInput = "bottomUpIn")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput = "categoryOut", destInput = "categoryIn")
    sensor = net.regions["sensor"]
    classifier = net.regions["classifier"]
    
    # Setup sensor region encoder and data stream.
    dataSource.close()
    dataSource = FileRecordStream(filename)
    sensorRegion = sensor.getSelf()
    sensorRegion.encoder = MultiEncoder()
    sensorRegion.encoder.addEncoder(
        "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
    sensorRegion.dataSource = dataSource
    
    # Get ready to run.
    net.initialize()

    # Train the network (by default learning is ON in the classifier, but assert
    # anyway) and then turn off learning and turn on inference mode.
    self.assertEqual(classifier.getParameter("learningMode"), 1)
    net.run(8)
    classifier.setParameter("inferenceMode", 1)
    classifier.setParameter("learningMode", 0)

    # Assert learning is OFF and that the classifier learned the dataset.
    self.assertEqual(classifier.getParameter("learningMode"), 0,
        "Learning mode is not turned off.")
    self.assertEqual(classifier.getParameter("inferenceMode"), 1,
        "Inference mode is not turned on.")
    self.assertEqual(classifier.getParameter("categoryCount"), 3,
        "The classifier should count three total categories.")
    # classififer learns 12 patterns b/c there are 12 categories amongst the
    # records:
    self.assertEqual(classifier.getParameter("patternCount"), 12,
        "The classifier should've learned 12 samples in total.")

    # Test the network on the same data as it trained on; should classify with
    # 100% accuracy.
    expectedCats = ([0.0, 0.5, 0.5],
                    [0.0, 0.5, 0.5],
                    [0.0, 0.5, 0.5],
                    [0.5, 0.5, 0.0],
                    [0.0, 0.5, 0.5],
                    [0.0, 0.5, 0.5],
                    [0.5, 0.5, 0.0],
                    [0.0, 0.5, 0.5])
    dataSource.rewind()
    for i in xrange(8):
      net.run(1)
      inferredCats = classifier.getOutputData("categoriesOut")
      self.assertSequenceEqual(expectedCats[i], inferredCats.tolist(),
          "Classififer did not infer expected category probabilites for record "
          "number {}.".format(i))
    
    # Close data stream, delete file.
    dataSource.close()
    os.remove(filename)
    def runNodesTest(self, nodeType1, nodeType2):
        # =====================================================
        # Build and run the network
        # =====================================================
        LOGGER.info('test(level1: %s, level2: %s)', nodeType1, nodeType2)
        net = Network()
        level1 = net.addRegion("level1", nodeType1, "{int32Param: 15}")
        dims = Dimensions([6, 4])
        level1.setDimensions(dims)

        level2 = net.addRegion("level2", nodeType2, "{real64Param: 128.23}")

        net.link("level1", "level2", "TestFanIn2", "")

        # Could call initialize here, but not necessary as net.run()
        # initializes implicitly.
        # net.initialize()

        net.run(1)
        LOGGER.info("Successfully created network and ran for one iteration")

        # =====================================================
        # Check everything
        # =====================================================
        dims = level1.getDimensions()
        self.assertEqual(len(dims), 2)
        self.assertEqual(dims[0], 6)
        self.assertEqual(dims[1], 4)

        dims = level2.getDimensions()
        self.assertEqual(len(dims), 2)
        self.assertEqual(dims[0], 3)
        self.assertEqual(dims[1], 2)

        # Check L1 output. "False" means don't copy, i.e.
        # get a pointer to the actual output
        # Actual output values are determined by the TestNode
        # compute() behavior.
        l1output = level1.getOutputData("bottomUpOut")
        self.assertEqual(len(l1output), 48)  # 24 nodes; 2 values per node
        for i in xrange(24):
            self.assertEqual(l1output[2 * i],
                             0)  # size of input to each node is 0
            self.assertEqual(l1output[2 * i + 1], i)  # node number

        # check L2 output.
        l2output = level2.getOutputData("bottomUpOut")
        self.assertEqual(len(l2output), 12)  # 6 nodes; 2 values per node
        # Output val = node number + sum(inputs)
        # Can compute from knowing L1 layout
        #
        #  00 01 | 02 03 | 04 05
        #  06 07 | 08 09 | 10 11
        #  ---------------------
        #  12 13 | 14 15 | 16 17
        #  18 19 | 20 21 | 22 23
        outputVals = []
        outputVals.append(0 + (0 + 1 + 6 + 7))
        outputVals.append(1 + (2 + 3 + 8 + 9))
        outputVals.append(2 + (4 + 5 + 10 + 11))
        outputVals.append(3 + (12 + 13 + 18 + 19))
        outputVals.append(4 + (14 + 15 + 20 + 21))
        outputVals.append(5 + (16 + 17 + 22 + 23))
        for i in xrange(6):
            if l2output[2 * i] != 8:
                LOGGER.info(l2output[2 * i])
                # from dbgp.client import brk; brk(port=9019)

            self.assertEqual(l2output[2 * i],
                             8)  # size of input for each node is 8
            self.assertEqual(l2output[2 * i + 1], outputVals[i])

        # =====================================================
        # Run for one more iteration
        # =====================================================
        LOGGER.info("Running for a second iteration")
        net.run(1)

        # =====================================================
        # Check everything again
        # =====================================================

        # Outputs are all the same except that the first output is
        # incremented by the iteration number
        for i in xrange(24):
            self.assertEqual(l1output[2 * i], 1)
            self.assertEqual(l1output[2 * i + 1], i)

        for i in xrange(6):
            self.assertEqual(l2output[2 * i], 9)
            self.assertEqual(l2output[2 * i + 1], outputVals[i] + 4)

        # =====================================================
        # Demonstrate a few other features
        # =====================================================

        #
        # Linking can induce dimensions downward
        #

        net = Network()
        level1 = net.addRegion("level1", nodeType1, "")
        level2 = net.addRegion("level2", nodeType2, "")
        dims = Dimensions([3, 2])
        level2.setDimensions(dims)
        net.link("level1", "level2", "TestFanIn2", "")
        net.initialize()

        # Level1 should now have dimensions [6, 4]
        self.assertEqual(level1.getDimensions()[0], 6)
        self.assertEqual(level1.getDimensions()[1], 4)
  def testSimpleMulticlassNetworkPY(self):
    # Setup data record stream of fake data (with three categories)
    filename = _getTempFileName()
    fields = [("timestamp", "datetime", "T"),
              ("value", "float", ""),
              ("reset", "int", "R"),
              ("sid", "int", "S"),
              ("categories", "list", "C")]
    records = (
      [datetime(day=1, month=3, year=2010), 0.0, 1, 0, "0"],
      [datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1"],
      [datetime(day=3, month=3, year=2010), 0.0, 0, 0, "0"],
      [datetime(day=4, month=3, year=2010), 1.0, 0, 0, "1"],
      [datetime(day=5, month=3, year=2010), 0.0, 0, 0, "0"],
      [datetime(day=6, month=3, year=2010), 1.0, 0, 0, "1"],
      [datetime(day=7, month=3, year=2010), 0.0, 0, 0, "0"],
      [datetime(day=8, month=3, year=2010), 1.0, 0, 0, "1"])
    dataSource = FileRecordStream(streamID=filename, write=True, fields=fields)
    for r in records:
      dataSource.appendRecord(list(r))

    # Create the network and get region instances.
    net = Network()
    net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
    net.addRegion("classifier", "py.SDRClassifierRegion",
                  "{steps: '0', alpha: 0.001, implementation: 'py'}")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput="dataOut", destInput="bottomUpIn")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput="categoryOut", destInput="categoryIn")
    sensor = net.regions["sensor"]
    classifier = net.regions["classifier"]

    # Setup sensor region encoder and data stream.
    dataSource.close()
    dataSource = FileRecordStream(filename)
    sensorRegion = sensor.getSelf()
    sensorRegion.encoder = MultiEncoder()
    sensorRegion.encoder.addEncoder(
      "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
    sensorRegion.dataSource = dataSource

    # Get ready to run.
    net.initialize()

    # Train the network (by default learning is ON in the classifier, but assert
    # anyway) and then turn off learning and turn on inference mode.
    self.assertEqual(classifier.getParameter("learningMode"), 1)
    net.run(8)

    # Test the network on the same data as it trained on; should classify with
    # 100% accuracy.
    classifier.setParameter("inferenceMode", 1)
    classifier.setParameter("learningMode", 0)

    # Assert learning is OFF and that the classifier learned the dataset.
    self.assertEqual(classifier.getParameter("learningMode"), 0,
                     "Learning mode is not turned off.")
    self.assertEqual(classifier.getParameter("inferenceMode"), 1,
                     "Inference mode is not turned on.")

    # make sure we can access all the parameters with getParameter
    self.assertEqual(classifier.getParameter("maxCategoryCount"), 2000)
    self.assertAlmostEqual(float(classifier.getParameter("alpha")), 0.001)
    self.assertEqual(int(classifier.getParameter("steps")), 0)
    self.assertTrue(classifier.getParameter("implementation") == "py")
    self.assertEqual(classifier.getParameter("verbosity"), 0)


    expectedCats = ([0.0], [1.0], [0.0], [1.0], [0.0], [1.0], [0.0], [1.0],)
    dataSource.rewind()
    for i in xrange(8):
      net.run(1)
      inferredCats = classifier.getOutputData("categoriesOut")
      self.assertSequenceEqual(expectedCats[i], inferredCats.tolist(),
                               "Classififer did not infer expected category "
                               "for record number {}.".format(i))
    # Close data stream, delete file.
    dataSource.close()
    os.remove(filename)
def _createNetwork(inverseReadoutResolution, anchorInputSize, dualPhase=False):
  """
  Create a simple network connecting sensor and motor inputs to the location
  region. Use :meth:`RawSensor.addDataToQueue` to add sensor input and growth
  candidates. Use :meth:`RawValues.addDataToQueue` to add motor input.
  ::
                        +----------+
    [   sensor*   ] --> |          | --> [     activeCells        ]
    [ candidates* ] --> | location | --> [    learnableCells      ]
    [    motor    ] --> |          | --> [ sensoryAssociatedCells ]
                        +----------+

  :param inverseReadoutResolution:
    Specifies the diameter of the circle of phases in the rhombus encoded by a
    bump.
  :type inverseReadoutResolution: int

  :type anchorInputSize: int
  :param anchorInputSize:
    The number of input bits in the anchor input.

  .. note::
    (*) This function will only add the 'sensor' and 'candidates' regions when
    'anchorInputSize' is greater than zero. This is useful if you would like to
    compute locations ignoring sensor input

  .. seealso::
     - :py:func:`htmresearch.frameworks.location.path_integration_union_narrowing.createRatModuleFromReadoutResolution`

  """
  net = Network()

  # Create simple region to pass motor commands as displacement vectors (dx, dy)
  net.addRegion("motor", "py.RawValues", json.dumps({
    "outputWidth": 2
  }))

  if anchorInputSize > 0:
    # Create simple region to pass growth candidates
    net.addRegion("candidates", "py.RawSensor", json.dumps({
      "outputWidth": anchorInputSize
    }))

    # Create simple region to pass sensor input
    net.addRegion("sensor", "py.RawSensor", json.dumps({
      "outputWidth": anchorInputSize
    }))

  # Initialize region with 5 modules varying scale by sqrt(2) and 4 different
  # random orientations for each scale
  scale = []
  orientation = []
  for i in xrange(5):
    for _ in xrange(4):
      angle = np.radians(random.gauss(7.5, 7.5))
      orientation.append(random.choice([angle, -angle]))
      scale.append(10.0 * (math.sqrt(2) ** i))

  # Create location region
  params = computeRatModuleParametersFromReadoutResolution(inverseReadoutResolution)
  params.update({
    "moduleCount": len(scale),
    "scale": scale,
    "orientation": orientation,
    "anchorInputSize": anchorInputSize,
    "activationThreshold": 8,
    "initialPermanence": 1.0,
    "connectedPermanence": 0.5,
    "learningThreshold": 8,
    "sampleSize": 10,
    "permanenceIncrement": 0.1,
    "permanenceDecrement": 0.0,
    "dualPhase": dualPhase,
    "bumpOverlapMethod": "probabilistic"
  })
  net.addRegion("location", "py.GridCellLocationRegion", json.dumps(params))

  if anchorInputSize > 0:
    # Link sensor
    net.link("sensor", "location", "UniformLink", "",
             srcOutput="dataOut", destInput="anchorInput")
    net.link("sensor", "location", "UniformLink", "",
             srcOutput="resetOut", destInput="resetIn")
    net.link("candidates", "location", "UniformLink", "",
             srcOutput="dataOut", destInput="anchorGrowthCandidates")

  # Link motor input
  net.link("motor", "location", "UniformLink", "",
           srcOutput="dataOut", destInput="displacement")

  # Initialize network objects
  net.initialize()

  return net
  def runNodesTest(self, nodeType1, nodeType2):
    # =====================================================
    # Build and run the network
    # =====================================================
    LOGGER.info('test(level1: %s, level2: %s)', nodeType1, nodeType2)
    net = Network()
    level1 = net.addRegion("level1", nodeType1, "{int32Param: 15}")
    dims = Dimensions([6, 4])
    level1.setDimensions(dims)

    level2 = net.addRegion("level2", nodeType2, "{real64Param: 128.23}")

    net.link("level1", "level2", "TestFanIn2", "")

    # Could call initialize here, but not necessary as net.run()
    # initializes implicitly.
    # net.initialize()

    net.run(1)
    LOGGER.info("Successfully created network and ran for one iteration")

    # =====================================================
    # Check everything
    # =====================================================
    dims = level1.getDimensions()
    self.assertEqual(len(dims), 2)
    self.assertEqual(dims[0], 6)
    self.assertEqual(dims[1], 4)

    dims = level2.getDimensions()
    self.assertEqual(len(dims), 2)
    self.assertEqual(dims[0], 3)
    self.assertEqual(dims[1], 2)

    # Check L1 output. "False" means don't copy, i.e.
    # get a pointer to the actual output
    # Actual output values are determined by the TestNode
    # compute() behavior.
    l1output = level1.getOutputData("bottomUpOut")
    self.assertEqual(len(l1output), 48) # 24 nodes; 2 values per node
    for i in xrange(24):
      self.assertEqual(l1output[2*i], 0)      # size of input to each node is 0
      self.assertEqual(l1output[2*i+1], i)    # node number

    # check L2 output.
    l2output = level2.getOutputData("bottomUpOut")
    self.assertEqual(len(l2output), 12) # 6 nodes; 2 values per node
    # Output val = node number + sum(inputs)
    # Can compute from knowing L1 layout
    #
    #  00 01 | 02 03 | 04 05
    #  06 07 | 08 09 | 10 11
    #  ---------------------
    #  12 13 | 14 15 | 16 17
    #  18 19 | 20 21 | 22 23
    outputVals = []
    outputVals.append(0 + (0 + 1 + 6 + 7))
    outputVals.append(1 + (2 + 3 + 8 + 9))
    outputVals.append(2 + (4 + 5 + 10 + 11))
    outputVals.append(3 + (12 + 13 + 18 + 19))
    outputVals.append(4 + (14 + 15 + 20 + 21))
    outputVals.append(5 + (16 + 17 + 22 + 23))
    for i in xrange(6):
      if l2output[2*i] != 8:
        LOGGER.info(l2output[2*i])
        # from dbgp.client import brk; brk(port=9019)

      self.assertEqual(l2output[2*i], 8)      # size of input for each node is 8
      self.assertEqual(l2output[2*i+1], outputVals[i])


    # =====================================================
    # Run for one more iteration
    # =====================================================
    LOGGER.info("Running for a second iteration")
    net.run(1)

    # =====================================================
    # Check everything again
    # =====================================================

    # Outputs are all the same except that the first output is
    # incremented by the iteration number
    for i in xrange(24):
      self.assertEqual(l1output[2*i], 1)
      self.assertEqual(l1output[2*i+1], i)

    for i in xrange(6):
      self.assertEqual(l2output[2*i], 9)
      self.assertEqual(l2output[2*i+1], outputVals[i] + 4)


    # =====================================================
    # Demonstrate a few other features
    # =====================================================

    #
    # Linking can induce dimensions downward
    #


    net = Network()
    level1 = net.addRegion("level1", nodeType1, "")
    level2 = net.addRegion("level2", nodeType2, "")
    dims = Dimensions([3, 2])
    level2.setDimensions(dims)
    net.link("level1", "level2", "TestFanIn2", "")
    net.initialize()

    # Level1 should now have dimensions [6, 4]
    self.assertEqual(level1.getDimensions()[0], 6)
    self.assertEqual(level1.getDimensions()[1], 4)
    def testSimpleMulticlassNetworkPY(self):
        # Setup data record stream of fake data (with three categories)
        filename = _getTempFileName()
        fields = [("timestamp", "datetime", "T"), ("value", "float", ""),
                  ("reset", "int", "R"), ("sid", "int", "S"),
                  ("categories", "list", "C")]
        records = ([datetime(day=1, month=3, year=2010), 0.0, 1, 0, "0"], [
            datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1"
        ], [datetime(day=3, month=3, year=2010), 0.0, 0, 0,
            "0"], [datetime(day=4, month=3, year=2010), 1.0, 0, 0,
                   "1"], [datetime(day=5, month=3, year=2010), 0.0, 0, 0, "0"],
                   [datetime(day=6, month=3, year=2010), 1.0, 0, 0, "1"
                    ], [datetime(day=7, month=3, year=2010), 0.0, 0, 0, "0"],
                   [datetime(day=8, month=3, year=2010), 1.0, 0, 0, "1"])
        dataSource = FileRecordStream(streamID=filename,
                                      write=True,
                                      fields=fields)
        for r in records:
            dataSource.appendRecord(list(r))

        # Create the network and get region instances.
        net = Network()
        net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
        net.addRegion("classifier", "py.SDRClassifierRegion",
                      "{steps: '0', alpha: 0.001, implementation: 'py'}")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="dataOut",
                 destInput="bottomUpIn")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="categoryOut",
                 destInput="categoryIn")
        sensor = net.regions["sensor"]
        classifier = net.regions["classifier"]

        # Setup sensor region encoder and data stream.
        dataSource.close()
        dataSource = FileRecordStream(filename)
        sensorRegion = sensor.getSelf()
        sensorRegion.encoder = MultiEncoder()
        sensorRegion.encoder.addEncoder(
            "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
        sensorRegion.dataSource = dataSource

        # Get ready to run.
        net.initialize()

        # Train the network (by default learning is ON in the classifier, but assert
        # anyway) and then turn off learning and turn on inference mode.
        self.assertEqual(classifier.getParameter("learningMode"), 1)
        net.run(8)

        # Test the network on the same data as it trained on; should classify with
        # 100% accuracy.
        classifier.setParameter("inferenceMode", 1)
        classifier.setParameter("learningMode", 0)

        # Assert learning is OFF and that the classifier learned the dataset.
        self.assertEqual(classifier.getParameter("learningMode"), 0,
                         "Learning mode is not turned off.")
        self.assertEqual(classifier.getParameter("inferenceMode"), 1,
                         "Inference mode is not turned on.")

        # make sure we can access all the parameters with getParameter
        self.assertEqual(classifier.getParameter("maxCategoryCount"), 2000)
        self.assertAlmostEqual(float(classifier.getParameter("alpha")), 0.001)
        self.assertEqual(int(classifier.getParameter("steps")), 0)
        self.assertTrue(classifier.getParameter("implementation") == "py")
        self.assertEqual(classifier.getParameter("verbosity"), 0)

        expectedCats = (
            [0.0],
            [1.0],
            [0.0],
            [1.0],
            [0.0],
            [1.0],
            [0.0],
            [1.0],
        )
        dataSource.rewind()
        for i in xrange(8):
            net.run(1)
            inferredCats = classifier.getOutputData("categoriesOut")
            self.assertSequenceEqual(
                expectedCats[i], inferredCats.tolist(),
                "Classififer did not infer expected category "
                "for record number {}.".format(i))
        # Close data stream, delete file.
        dataSource.close()
        os.remove(filename)
예제 #33
0
class HTMusicModel(object):
    def __init__(self, model_params):
        # Init an HTM network
        self.network = Network()

        # Getting parameters for network regions
        self.sensor_params = model_params['Sensor']
        self.spatial_pooler_params = model_params['SpatialPooler']
        self.temporal_memory_params = model_params['TemporalMemory']
        self.classifiers_params = model_params['Classifiers']
        self.encoders_params = model_params['Encoders']

        # Adding regions to HTM network
        self.network.addRegion('DurationEncoder', 'ScalarSensor',
                               json.dumps(self.encoders_params['duration']))
        self.network.addRegion('VelocityEncoder', 'ScalarSensor',
                               json.dumps(self.encoders_params['pitch']))
        self.network.addRegion('PitchEncoder', 'ScalarSensor',
                               json.dumps(self.encoders_params['velocity']))

        self.network.addRegion('SpatialPooler', 'py.SPRegion',
                               json.dumps(self.spatial_pooler_params))
        self.network.addRegion('TemporalMemory', 'py.TMRegion',
                               json.dumps(self.temporal_memory_params))

        # Creating outer classifiers for multifield prediction
        dclp = self.classifiers_params['duration']
        vclp = self.classifiers_params['pitch']
        pclp = self.classifiers_params['velocity']

        self.duration_classifier = SDRClassifier(
            steps=(1, ),
            verbosity=dclp['verbosity'],
            alpha=dclp['alpha'],
            actValueAlpha=dclp['actValueAlpha'])
        self.velocity_classifier = SDRClassifier(
            steps=(1, ),
            verbosity=vclp['verbosity'],
            alpha=vclp['alpha'],
            actValueAlpha=vclp['actValueAlpha'])
        self.pitch_classifier = SDRClassifier(
            steps=(1, ),
            verbosity=pclp['verbosity'],
            alpha=pclp['alpha'],
            actValueAlpha=pclp['actValueAlpha'])

        self._link_all_regions()
        self._enable_learning()
        self._enable_inference()

        self.network.initialize()

    def _link_all_regions(self):
        # Linking regions
        self.network.link('DurationEncoder', 'SpatialPooler', 'UniformLink',
                          '')
        self.network.link('VelocityEncoder', 'SpatialPooler', 'UniformLink',
                          '')
        self.network.link('PitchEncoder', 'SpatialPooler', 'UniformLink', '')
        self.network.link('SpatialPooler',
                          'TemporalMemory',
                          'UniformLink',
                          '',
                          srcOutput='bottomUpOut',
                          destInput='bottomUpIn')

    def _enable_learning(self):
        # Enable learning for all regions.
        self.network.regions["SpatialPooler"].setParameter("learningMode", 1)
        self.network.regions["TemporalMemory"].setParameter("learningMode", 1)

    def _enable_inference(self):
        # Enable inference for all regions.
        self.network.regions["SpatialPooler"].setParameter("inferenceMode", 1)
        self.network.regions["TemporalMemory"].setParameter("inferenceMode", 1)

    def train(self, duration, pitch, velocity):
        records_total = self.network.regions['SpatialPooler'].getSelf(
        ).getAlgorithmInstance().getIterationNum()

        self.network.regions['DurationEncoder'].setParameter(
            'sensedValue', duration)
        self.network.regions['PitchEncoder'].setParameter('sensedValue', pitch)
        self.network.regions['VelocityEncoder'].setParameter(
            'sensedValue', velocity)
        self.network.run(1)

        # Getting active cells of TM and bucket indicies of encoders to feed classifiers
        active_cells = numpy.array(
            self.network.regions['TemporalMemory'].getOutputData(
                'bottomUpOut')).nonzero()[0]
        duration_bucket = numpy.array(
            self.network.regions['DurationEncoder'].getOutputData('bucket'))
        pitch_bucket = numpy.array(
            self.network.regions['PitchEncoder'].getOutputData('bucket'))
        velocity_bucket = numpy.array(
            self.network.regions['VelocityEncoder'].getOutputData('bucket'))

        duration_classifier_result = self.duration_classifier.compute(
            recordNum=records_total,
            patternNZ=active_cells,
            classification={
                'bucketIdx': duration_bucket[0],
                'actValue': duration
            },
            learn=True,
            infer=False)

        pitch_classifier_result = self.pitch_classifier.compute(
            recordNum=records_total,
            patternNZ=active_cells,
            classification={
                'bucketIdx': pitch_bucket[0],
                'actValue': pitch
            },
            learn=True,
            infer=False)

        velocity_classifier_result = self.velocity_classifier.compute(
            recordNum=records_total,
            patternNZ=active_cells,
            classification={
                'bucketIdx': velocity_bucket[0],
                'actValue': velocity
            },
            learn=True,
            infer=False)

    def generate(self, seed, output_dir, event_amount):
        records_total = self.network.regions['SpatialPooler'].getSelf(
        ).getAlgorithmInstance().getIterationNum()

        seed = seed

        midi = pretty_midi.PrettyMIDI()
        midi_program = pretty_midi.instrument_name_to_program(
            'Acoustic Grand Piano')
        piano = pretty_midi.Instrument(program=midi_program)
        clock = 0
        for iters in tqdm(range(records_total, records_total + event_amount)):
            duration = seed[0]
            pitch = seed[1]
            velocity = seed[2]

            self.network.regions['DurationEncoder'].setParameter(
                'sensedValue', duration)
            self.network.regions['PitchEncoder'].setParameter(
                'sensedValue', pitch)
            self.network.regions['VelocityEncoder'].setParameter(
                'sensedValue', velocity)
            self.network.run(1)

            # Getting active cells of TM and bucket indicies of encoders to feed classifiers
            active_cells = numpy.array(
                self.network.regions['TemporalMemory'].getOutputData(
                    'bottomUpOut')).nonzero()[0]

            duration_bucket = numpy.array(
                self.network.regions['DurationEncoder'].getOutputData(
                    'bucket'))

            pitch_bucket = numpy.array(
                self.network.regions['PitchEncoder'].getOutputData('bucket'))

            velocity_bucket = numpy.array(
                self.network.regions['VelocityEncoder'].getOutputData(
                    'bucket'))

            # Getting up classifiers result

            duration_classifier_result = self.duration_classifier.compute(
                recordNum=records_total,
                patternNZ=active_cells,
                classification={
                    'bucketIdx': duration_bucket[0],
                    'actValue': duration
                },
                learn=False,
                infer=True)

            pitch_classifier_result = self.pitch_classifier.compute(
                recordNum=records_total,
                patternNZ=active_cells,
                classification={
                    'bucketIdx': pitch_bucket[0],
                    'actValue': pitch
                },
                learn=False,
                infer=True)

            velocity_classifier_result = self.velocity_classifier.compute(
                recordNum=records_total,
                patternNZ=active_cells,
                classification={
                    'bucketIdx': velocity_bucket[0],
                    'actValue': velocity
                },
                learn=False,
                infer=True)

            du = duration_classifier_result[1].argmax()
            pi = pitch_classifier_result[1].argmax()
            ve = velocity_classifier_result[1].argmax()

            duration_top_probs = duration_classifier_result[1][
                0:2] / duration_classifier_result[1][0:2].sum()

            predicted_duration = duration_classifier_result['actualValues'][du]

            # predicted_duration = duration_classifier_result['actualValues'][du]
            predicted_pitch = pitch_classifier_result['actualValues'][pi]
            predicted_velocity = velocity_classifier_result['actualValues'][ve]

            # print duration_classifier_result

            note = pretty_midi.Note(velocity=int(predicted_velocity),
                                    pitch=int(predicted_pitch),
                                    start=float(clock),
                                    end=float(clock + predicted_duration))

            piano.notes.append(note)

            clock = clock + 0.25

            seed[0] = predicted_duration
            seed[1] = predicted_pitch
            seed[2] = predicted_velocity

        midi.instruments.append(piano)
        midi.remove_invalid_notes()
        time = datetime.datetime.now().strftime('%Y-%m-%d %H:%m:%S')
        midi.write(output_dir + time + '.mid')

    def load_model(self, load_path):

        # Loading SpatialPooler
        print 'Loading SpatialPooler'
        with open(load_path + 'sp.bin', 'rb') as sp:
            sp_builder = SpatialPoolerProto.read(
                sp, traversal_limit_in_words=2**61)
        self.network.regions['SpatialPooler'].getSelf(
        )._sfdr = self.network.regions['SpatialPooler'].getSelf()._sfdr.read(
            sp_builder)

        # Loading TemporalMemory
        print 'Loading TemporalMemory'
        self.network.regions['TemporalMemory'].getSelf().getAlgorithmInstance(
        ).loadFromFile(load_path + 'tm.bin')

        # Loading end classifier
        print 'Loading duration classifier'
        with open(load_path + 'dcl.bin', 'rb') as dcl:
            dcl_builder = SdrClassifierProto.read(
                dcl, traversal_limit_in_words=2**61)
        self.duration_classifier = self.duration_classifier.read(dcl_builder)

        # Loading pitch classifier
        print 'Loading pitch classifier'
        with open(load_path + 'pcl.bin', 'rb') as pcl:
            pcl_builder = SdrClassifierProto.read(
                pcl, traversal_limit_in_words=2**61)
        self.pitch_classifier = self.pitch_classifier.read(pcl_builder)

        # Loading velocity classifier
        print 'Loading velocity classifier'
        with open(load_path + 'vcl.bin', 'rb') as vcl:
            vcl_builder = SdrClassifierProto.read(
                vcl, traversal_limit_in_words=2**61)
        self.velocity_classifier = self.velocity_classifier.read(vcl_builder)

    def save_model(self, save_path):

        # Saving SpatialPooler
        print 'Saving SpatialPooler'
        sp_builder = SpatialPoolerProto.new_message()
        self.network.regions['SpatialPooler'].getSelf().getAlgorithmInstance(
        ).write(sp_builder)
        with open(save_path + 'sp.bin', 'w+b') as sp:
            sp_builder.write(sp)

        # Saving TemporalMemory
        print 'Saving TemporalMemory'
        self.network.regions['TemporalMemory'].getSelf().getAlgorithmInstance(
        ).saveToFile(save_path + 'tm.bin')

        # Saving end classifier
        print 'Saving duration classifier'
        dcl_builder = SdrClassifierProto.new_message()
        self.duration_classifier.write(dcl_builder)
        with open(save_path + 'dcl.bin', 'w+b') as dcl:
            dcl_builder.write(dcl)

        # Saving pitch classifier
        print 'Saving pitch classifier'
        pcl_builder = SdrClassifierProto.new_message()
        self.pitch_classifier.write(pcl_builder)
        with open(save_path + 'pcl.bin', 'w+b') as pcl:
            pcl_builder.write(pcl)

        # Saving velocity classifier
        print 'Saving velocity classifier'
        vcl_builder = SdrClassifierProto.new_message()
        self.velocity_classifier.write(vcl_builder)
        with open(save_path + 'vcl.bin', 'w+b') as vcl:
            vcl_builder.write(vcl)
예제 #34
0
def createNetwork(dataSource):
    """
    networkを作成する.
    sensor, sp, tp
    """
    network = Network()

    # create sensor region

    # create sensor region
    network.addRegion("sensor", "py.RecordSensor",
            json.dumps({"verbosity": 0}))
    sensor = network.regions["sensor"].getSelf()
    sensor.encoder = createEncoder()
    sensor.disabledEncoder = createCategoryEncoder()
    #sensor.dataSource = dataSource
    sensor.dataSource = DataBuffer()


    # create spacial pooler region
    print sensor.encoder.getWidth()
    SP_PARAMS["inputWidth"] = sensor.encoder.getWidth()
    network.addRegion("SP", "py.SPRegion", json.dumps(SP_PARAMS))

    # linke sensor input <-> SP Region
    # Resion毎のinput/output名は, regions下の, SPRegion.py, TPRegion.py, RecordSensor.py
    network.link("sensor", "SP", "UniformLink", "")
    network.link("sensor", "SP", "UniformLink", "",
            srcOutput="resetOut", destInput="resetIn")
    network.link("SP", "sensor", "UniformLink", "",                          # これ, なくしても何も変化なかったけど..
            srcOutput="spatialTopDownOut", destInput="spatialTopDownIn")
    network.link("SP", "sensor", "UniformLink", "",                          # これ, なくしても何も変化なかったけど..
            srcOutput="temporalTopDownOut", destInput="temporalTopDownIn")

    # create temporal pooler region
    network.addRegion("TP", "py.TPRegion",
            json.dumps(TP_PARAMS))

    network.link("SP", "TP", "UniformLink", "")
    network.link("TP", "SP", "UniformLink", "",                              # これ, なくしても何も変化なかったけど..
            srcOutput="topDownOut", destInput="topDownIn")

    # create classifier
    network.addRegion("Classifier", "py.CLAClassifierRegion",
            json.dumps(CLASSIFIER_PARAMS))

    network.link("TP", "Classifier", "UniformLink", "")
    network.link("sensor", "Classifier", "UniformLink", "",
            srcOutput="categoryOut", destInput="categoryIn")


    # initialize
    network.initialize()

    # setting sp
    SP = network.regions["SP"]
    SP.setParameter("learningMode", True)
    SP.setParameter("anomalyMode", True)

    # setting tp
    TP = network.regions["TP"]
    TP.setParameter("topDownMode", False)
    TP.setParameter("learningMode", True)
    TP.setParameter("inferenceMode", True)

    # OPFでやってるみたいな, AnomalyClassifierを追加するやり方とちがうのか.
    TP.setParameter("anomalyMode", False)

    # classifier regionを定義.
    classifier = network.regions["Classifier"]
    classifier.setParameter('inferenceMode', True)
    classifier.setParameter('learningMode', True)


    return network
  def testCreateL4L6aLocationColumn(self):
    """
    Test 'createL4L6aLocationColumn' by inferring a set of hand crafted objects
    """
    scale = []
    orientation = []
    # Initialize L6a location region with 5 modules varying scale by sqrt(2) and
    # 4 different random orientations for each scale
    for i in xrange(5):
      for _ in xrange(4):
        angle = np.radians(random.gauss(7.5, 7.5))
        orientation.append(random.choice([angle, -angle]))
        scale.append(10.0 * (math.sqrt(2) ** i))

    net = Network()
    createL4L6aLocationColumn(net, {
      "inverseReadoutResolution": 8,
      "sensorInputSize": NUM_OF_CELLS,
      "L4Params": {
        "columnCount": NUM_OF_COLUMNS,
        "cellsPerColumn": CELLS_PER_COLUMN,
        "activationThreshold": 15,
        "minThreshold": 15,
        "initialPermanence": 1.0,
        "implementation": "ApicalTiebreak",
        "maxSynapsesPerSegment": -1
      },
      "L6aParams": {
        "moduleCount": len(scale),
        "scale": scale,
        "orientation": orientation,
        "anchorInputSize": NUM_OF_CELLS,
        "activationThreshold": 8,
        "initialPermanence": 1.0,
        "connectedPermanence": 0.5,
        "learningThreshold": 8,
        "sampleSize": 10,
        "permanenceIncrement": 0.1,
        "permanenceDecrement": 0.0,
        "bumpOverlapMethod": "probabilistic"
      }
    })
    net.initialize()

    L4 = net.regions['L4']
    L6a = net.regions['L6a']
    sensor = net.regions['sensorInput'].getSelf()
    motor = net.regions['motorInput'].getSelf()

    # Keeps a list of learned objects
    learnedRepresentations = defaultdict(list)

    # Learn Objects
    self._setLearning(net, True)

    for objectDescription in OBJECTS:
      reset = True
      previousLocation = None
      L6a.executeCommand(["activateRandomLocation"])

      for iFeature, feature in enumerate(objectDescription["features"]):
        # Move the sensor to the center of the object
        locationOnObject = np.array([feature["top"] + feature["height"] / 2.,
                                     feature["left"] + feature["width"] / 2.])

        # Calculate displacement from previous location
        if previousLocation is not None:
          motor.addDataToQueue(locationOnObject - previousLocation)
        previousLocation = locationOnObject

        # Sense feature at location
        sensor.addDataToQueue(FEATURE_ACTIVE_COLUMNS[feature["name"]], reset, 0)
        net.run(1)
        reset = False

        # Save learned representations
        representation = L6a.getOutputData("sensoryAssociatedCells")
        representation = representation.nonzero()[0]
        learnedRepresentations[
          (objectDescription["name"], iFeature)] = representation

    # Infer objects
    self._setLearning(net, False)

    for objectDescription in OBJECTS:
      reset = True
      previousLocation = None
      inferred = False

      features = objectDescription["features"]
      touchSequence = range(len(features))
      random.shuffle(touchSequence)

      for iFeature in touchSequence:
        feature = features[iFeature]

        # Move the sensor to the center of the object
        locationOnObject = np.array([feature["top"] + feature["height"] / 2.,
                                     feature["left"] + feature["width"] / 2.])

        # Calculate displacement from previous location
        if previousLocation is not None:
          motor.addDataToQueue(locationOnObject - previousLocation)
        previousLocation = locationOnObject

        # Sense feature at location
        sensor.addDataToQueue(FEATURE_ACTIVE_COLUMNS[feature["name"]], reset, 0)
        net.run(1)
        reset = False

        representation = L6a.getOutputData("sensoryAssociatedCells")
        representation = representation.nonzero()[0]
        target_representations = set(
          learnedRepresentations[(objectDescription["name"], iFeature)])

        inferred = (set(representation) <= target_representations)
        if inferred:
          break

      self.assertTrue(inferred)
class FunctionRecogniter():

    def __init__(self):
        from collections import OrderedDict

        self.run_number = 0

        # for classifier
        self.classifier_encoder_list = {}
        self.classifier_input_list   = {}
        self.prevPredictedColumns    = {}

        self.selectivity = "region1"

        # net structure
        self.net_structure = OrderedDict()
        self.net_structure['sensor1'] = ['region1']
        # self.net_structure['sensor2'] = ['region2']
        # self.net_structure['sensor3'] = ['region3']
        # self.net_structure['region1'] = ['region4']
        # self.net_structure['region2'] = ['region4']

        # sensor change params
        self.sensor_params = {
                'sensor1': {
                    'xy_value': {
                        'maxval': 100.0,
                        'minval':  0.0
                        },
                    },
                # 'sensor2': {
                #     'xy_value': {
                #         'maxval': 80.0,
                #         'minval': 20.0
                #         },
                #     },
                # 'sensor3': {
                #     'xy_value': {
                #         'maxval': 100.0,
                #         'minval':  40.0
                #         },
                #     },
                }

        # region change params
        self.dest_resgion_data = {
                'region1': {
                    'SP_PARAMS':{
                        "columnCount": 2024,
                        "numActiveColumnsPerInhArea": 20,
                        },
                    'TP_PARAMS':{
                        "cellsPerColumn": 16
                        },
                    },
                # 'region2': {
                #     'TP_PARAMS':{
                #         "cellsPerColumn": 8
                #         },
                #     },
                # 'region3': {
                #     'TP_PARAMS':{
                #         "cellsPerColumn": 8
                #         },
                #     },
                # 'region4': {
                #     'SP_PARAMS':{
                #         "inputWidth": 2024 * (4 + 8)
                #         },
                #     'TP_PARAMS':{
                #         "cellsPerColumn": 16
                #         },
                #     },
                 }

        self._createNetwork()


        # for evaluate netwrok accuracy
        self.evaluation = NetworkEvaluation()


    def _addRegion(self, src_name, dest_name, params):
        import json
        from nupic.encoders import MultiEncoder

        sensor     =  src_name
        sp_name    = "sp_" + dest_name
        tp_name    = "tp_" + dest_name
        class_name = "class_" + dest_name

        try:
            self.network.regions[sp_name]
            self.network.regions[tp_name]
            self.network.regions[class_name]

            self.network.link(sensor, sp_name, "UniformLink", "")

        except Exception as e:
            # sp
            self.network.addRegion(sp_name, "py.SPRegion", json.dumps(params['SP_PARAMS']))
            self.network.link(sensor, sp_name, "UniformLink", "")

            # tp
            self.network.addRegion(tp_name, "py.TPRegion", json.dumps(params['TP_PARAMS']))
            self.network.link(sp_name, tp_name, "UniformLink", "")

            # class
            self.network.addRegion( class_name, "py.CLAClassifierRegion", json.dumps(params['CLASSIFIER_PARAMS']))
            self.network.link(tp_name, class_name, "UniformLink", "")

            encoder = MultiEncoder()
            encoder.addMultipleEncoders(params['CLASSIFIER_ENCODE_PARAMS'])
            self.classifier_encoder_list[class_name]  = encoder
            self.classifier_input_list[class_name]    = tp_name

    def _initRegion(self, name):
        sp_name = "sp_"+ name
        tp_name = "tp_"+ name
        class_name = "class_"+ name

        # setting sp
        SP = self.network.regions[sp_name]
        SP.setParameter("learningMode", True)
        SP.setParameter("anomalyMode", True)

        # setting tp
        TP = self.network.regions[tp_name]
        TP.setParameter("topDownMode", False)
        TP.setParameter("learningMode", True)
        TP.setParameter("inferenceMode", True)
        TP.setParameter("anomalyMode", False)

        # classifier regionを定義.
        classifier = self.network.regions[class_name]
        classifier.setParameter('inferenceMode', True)
        classifier.setParameter('learningMode', True)


    def _createNetwork(self):

        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update


        from nupic.algorithms.anomaly import computeAnomalyScore
        from nupic.encoders import MultiEncoder
        from nupic.engine import Network
        import create_network as cn
        import json
        import itertools


        self.network = Network()

        # sensor
        for sensor_name, change_params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor", json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            params = deepupdate(cn.SENSOR_PARAMS, change_params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders( params )
            sensor.encoder         = encoder

            # set datasource
            sensor.dataSource      = cn.DataBuffer()


        # network
        print 'create network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                change_params = self.dest_resgion_data[dest]
                params = deepupdate(cn.PARAMS, change_params)

                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    params['SP_PARAMS']['inputWidth'] = sensor.encoder.getWidth()
                    self._addRegion(source, dest, params)
                else:
                    self._addRegion("tp_" + source, dest, params)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            self._initRegion(name)


        # TODO: 1-3-1構造で, TPのセル数をむやみに増やすことは逆効果になるのでは?

        return


    def run(self, input_data, learn=True):
        """
        networkの実行.
        学習したいときは, learn=True, ftypeを指定する.
        予測したいときは, learn=False, ftypeはNoneを指定する.
        学習しているときも, 予測はしているがな.

        input_data = {'xy_value': [1.0, 2.0], 'ftype': 'sin'}
        """
        import itertools

        self.enable_learning_mode(learn)
        self.run_number += 1

        # calc encoder, SP, TP
        for sensor_name in self.sensor_params.keys():
            self.network.regions[sensor_name].getSelf().dataSource.push(input_data)
        self.network.run(1)
        #self.layer_output(input_data)
        #self.debug(input_data)


        # learn classifier
        inferences = {}
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            class_name = "class_" + name
            inferences['classifier_'+name]   = self._learn_classifier_multi(class_name, actValue=input_data['ftype'], pstep=0)

        # anomaly
        inferences["anomaly"] = self._calc_anomaly()

        # selectivity
        if input_data['ftype'] is not None and inferences["anomaly"][self.selectivity] < 0.7:
        #if input_data['ftype'] is not None and input_data['xy_value'][0] > 40 and input_data['xy_value'][0] < 60:
            tp_bottomUpOut = self.network.regions[ "tp_" + self.selectivity ].getOutputData("bottomUpOut").nonzero()[0]
            self.evaluation.save_cell_activity(tp_bottomUpOut, input_data['ftype'])

        return inferences


    def _learn_classifier_multi(self, region_name, actValue=None, pstep=0):
        """
        classifierの計算を行う.

        直接customComputeを呼び出さずに, network.runの中でやりたいところだけど,
        計算した内容の取り出し方法がわからない.
        """

        # TODO: networkとclassifierを完全に切り分けたいな.
        #       networkでは, sensor,sp,tpまで計算を行う.
        #       その計算結果の評価/利用は外に出す.

        classifier     = self.network.regions[region_name]
        encoder        = self.classifier_encoder_list[region_name].getEncoderList()[0]
        class_input    = self.classifier_input_list[region_name]
        tp_bottomUpOut = self.network.regions[class_input].getOutputData("bottomUpOut").nonzero()[0]
        #tp_bottomUpOut = self.network.regions["TP"].getSelf()._tfdr.infActiveState['t'].reshape(-1).nonzero()[0]

        if actValue is not None:
            bucketIdx = encoder.getBucketIndices(actValue)[0]
            classificationIn = {
                    'bucketIdx': bucketIdx,
                    'actValue': actValue
                    }
        else:
            classificationIn = {'bucketIdx': 0,'actValue': 'no'}
        clResults = classifier.getSelf().customCompute(
                recordNum=self.run_number,
                patternNZ=tp_bottomUpOut,
                classification=classificationIn
                )

        inferences= self._get_inferences(clResults, pstep, summary_tyep='sum')

        return inferences

    def _get_inferences(self, clResults, steps, summary_tyep='sum'):
        """
        classifierの計算結果を使いやすいように変更するだけ.
        """
        from collections import defaultdict

        likelihoodsVec = clResults[steps]
        bucketValues   = clResults['actualValues']

        likelihoodsDict = defaultdict(int)
        bestActValue = None
        bestProb = None

        if summary_tyep == 'sum':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                likelihoodsDict[actValue] += prob
                if bestProb is None or likelihoodsDict[actValue] > bestProb:
                    bestProb = likelihoodsDict[actValue]
                    bestActValue = actValue

        elif summary_tyep == 'best':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                if bestProb is None or prob > bestProb:
                    likelihoodsDict[actValue] = prob
                    bestProb = prob
                    bestActValue = actValue

        return {'likelihoodsDict': likelihoodsDict, 'best': {'value': bestActValue, 'prob':bestProb}}


    def _calc_anomaly(self):
        """
        各層のanomalyを計算
        """
        import copy
        import itertools
        from nupic.algorithms.anomaly import computeAnomalyScore

        score = 0
        anomalyScore = {}
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            sp_bottomUpOut = self.network.regions["sp_"+name].getOutputData("bottomUpOut").nonzero()[0]
            if self.prevPredictedColumns.has_key(name):
                score = computeAnomalyScore(sp_bottomUpOut, self.prevPredictedColumns[name])
            #topdown_predict = self.network.regions["TP"].getSelf()._tfdr.topDownCompute().copy().nonzero()[0]
            topdown_predict = self.network.regions["tp_"+name].getSelf()._tfdr.topDownCompute().nonzero()[0]
            self.prevPredictedColumns[name] = copy.deepcopy(topdown_predict)

            anomalyScore[name] = score

        return anomalyScore

    def reset(self):
        """
        reset sequence
        """
        import itertools
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            self.network.regions["tp_"+name].getSelf().resetSequenceStates()

    def enable_learning_mode(self, enable):
        """
        各層のSP, TP, ClassifierのlearningModeを変更
        """
        import itertools
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            self.network.regions["sp_"+name].setParameter("learningMode", enable)
            self.network.regions["tp_"+name].setParameter("learningMode", enable)
            self.network.regions["class_"+name].setParameter("learningMode", enable)


    def print_inferences(self, input_data, inferences):
        """
        計算結果を出力する
        """
        import itertools

        print "%10s, %10s, %5s" % (
                int(input_data['xy_value'][0]),
                int(input_data['xy_value'][1]),
                input_data['ftype']),

        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            print "%5s," % (inferences['classifier_'+name]['best']['value']),

        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            print "%10.6f," % (inferences['classifier_'+name]['likelihoodsDict'][input_data['ftype']]),

        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            print "%5s," % (str(inferences["anomaly"][name])),
        print
예제 #37
0
    def reset(self, params, repetition):
        """
    Take the steps necessary to reset the experiment before each repetition:
      - Make sure random seed is different for each repetition
      - Create the L2-L4-L6a network
      - Generate objects used by the experiment
      - Learn all objects used by the experiment
    """
        print params["name"], ":", repetition

        self.debug = params.get("debug", False)

        L2Params = json.loads('{' + params["l2_params"] + '}')
        L4Params = json.loads('{' + params["l4_params"] + '}')
        L6aParams = json.loads('{' + params["l6a_params"] + '}')

        # Make sure random seed is different for each repetition
        seed = params.get("seed", 42)
        np.random.seed(seed + repetition)
        random.seed(seed + repetition)
        L2Params["seed"] = seed + repetition
        L4Params["seed"] = seed + repetition
        L6aParams["seed"] = seed + repetition

        # Configure L6a params
        numModules = params["num_modules"]
        L6aParams["scale"] = [params["scale"]] * numModules
        angle = params["angle"] / numModules
        orientation = range(angle / 2, angle * numModules, angle)
        L6aParams["orientation"] = np.radians(orientation).tolist()

        # Create multi-column L2-L4-L6a network
        self.numColumns = params["num_cortical_columns"]
        network = Network()
        network = createMultipleL246aLocationColumn(
            network=network,
            numberOfColumns=self.numColumns,
            L2Params=L2Params,
            L4Params=L4Params,
            L6aParams=L6aParams)
        network.initialize()

        self.network = network
        self.sensorInput = []
        self.motorInput = []
        self.L2Regions = []
        self.L4Regions = []
        self.L6aRegions = []
        for i in xrange(self.numColumns):
            col = str(i)
            self.sensorInput.append(network.regions["sensorInput_" +
                                                    col].getSelf())
            self.motorInput.append(network.regions["motorInput_" +
                                                   col].getSelf())
            self.L2Regions.append(network.regions["L2_" + col])
            self.L4Regions.append(network.regions["L4_" + col])
            self.L6aRegions.append(network.regions["L6a_" + col])

        # Use the number of iterations as the number of objects. This will allow us
        # to execute one iteration per object and use the "iteration" parameter as
        # the object index
        numObjects = params["iterations"]

        # Generate feature SDRs
        numFeatures = params["num_features"]
        numOfMinicolumns = L4Params["columnCount"]
        numOfActiveMinicolumns = params["num_active_minicolumns"]
        self.featureSDR = [{
            str(f):
            sorted(np.random.choice(numOfMinicolumns, numOfActiveMinicolumns))
            for f in xrange(numFeatures)
        } for _ in xrange(self.numColumns)]

        # Generate objects used in the experiment
        self.objects = generateObjects(
            numObjects=numObjects,
            featuresPerObject=params["features_per_object"],
            objectWidth=params["object_width"],
            numFeatures=numFeatures,
            distribution=params["feature_distribution"])

        # Make sure the objects are unique
        uniqueObjs = np.unique([{
            "features": obj["features"]
        } for obj in self.objects])
        assert len(uniqueObjs) == len(self.objects)

        self.sdrSize = L2Params["sdrSize"]

        # Learn objects
        self.numLearningPoints = params["num_learning_points"]
        self.numOfSensations = params["num_sensations"]
        self.learnedObjects = {}
        self.learn()
예제 #38
0
    def testSerialization(self):
        n = Network()

        imageDims = (42, 38)
        params = dict(width=imageDims[0],
                      height=imageDims[1],
                      mode="bw",
                      background=1,
                      invertOutput=1)

        sensor = n.addRegion("sensor", "py.ImageSensor", json.dumps(params))
        sensor.setDimensions(Dimensions(imageDims[0], imageDims[1]))

        params = dict(inputShape=imageDims,
                      coincidencesShape=imageDims,
                      disableTemporal=1,
                      tpSeed=43,
                      spSeed=42,
                      nCellsPerCol=1)

        l1 = n.addRegion("l1", "py.CLARegion", json.dumps(params))

        params = dict(maxCategoryCount=48,
                      SVDSampleCount=400,
                      SVDDimCount=5,
                      distanceNorm=0.6)

        _classifier = n.addRegion("classifier", "py.KNNClassifierRegion",
                                  json.dumps(params))

        # TODO: link params should not be required. Dest region dimensions are
        # already specified as [1]
        params = dict(mapping="in", rfSize=imageDims)

        n.link("sensor", "l1", "UniformLink", json.dumps(params))
        n.link("l1", "classifier", "UniformLink", "", "bottomUpOut",
               "bottomUpIn")
        n.link("sensor", "classifier", "UniformLink", "", "categoryOut",
               "categoryIn")
        n.initialize()

        n.save("fdr.nta")

        # Make sure the network bundle has all the expected files
        self.assertTrue(os.path.exists("fdr.nta/network.yaml"))
        self.assertTrue(os.path.exists("fdr.nta/R0-pkl"))
        self.assertTrue(os.path.exists("fdr.nta/R1-pkl"))
        self.assertTrue(os.path.exists("fdr.nta/R2-pkl"))

        n2 = Network("fdr.nta")
        n2.initialize()  # should not fail

        # Make sure the network is actually the same
        sensor = n2.regions['sensor']
        self.assertEqual(sensor.type, "py.ImageSensor")
        # would like to directly compare, but can't -- NPC-6
        self.assertEqual(str(sensor.dimensions), str(Dimensions(42, 38)))
        self.assertEqual(sensor.getParameter("width"), 42)
        self.assertEqual(sensor.getParameter("height"), 38)
        self.assertEqual(sensor.getParameter("mode"), "bw")
        self.assertEqual(sensor.getParameter("background"), 1)
        self.assertEqual(sensor.getParameter("invertOutput"), 1)

        l1 = n2.regions['l1']
        self.assertEqual(l1.type, "py.CLARegion")
        self.assertEqual(str(l1.dimensions), str(Dimensions(1)))
        a = l1.getParameter("inputShape")
        self.assertEqual(len(a), 2)
        self.assertEqual(a[0], 42)
        self.assertEqual(a[1], 38)

        a = l1.getParameter("coincidencesShape")
        self.assertEqual(len(a), 2)
        self.assertEqual(a[0], 42)
        self.assertEqual(a[1], 38)

        self.assertEqual(l1.getParameter("disableTemporal"), 1)
        self.assertEqual(l1.getParameter("spSeed"), 42)
        self.assertEqual(l1.getParameter("tpSeed"), 43)

        cl = n2.regions['classifier']
        self.assertEqual(cl.type, "py.KNNClassifierRegion")
        self.assertEqual(cl.getParameter("maxCategoryCount"), 48)
        self.assertEqual(cl.getParameter("SVDSampleCount"), 400)
        self.assertEqual(cl.getParameter("SVDDimCount"), 5)
        self.assertLess((cl.getParameter("distanceNorm") - 0.6), 0.0001)
        self.assertEqual(str(cl.dimensions), str(Dimensions(1)))

        n2.save("fdr2.nta")

        # now compare the two network bundles -- should be the same
        c = filecmp.dircmp("fdr.nta", "fdr2.nta")
        self.assertEqual(len(c.left_only), 0,
                         "fdr.nta has extra files: %s" % c.left_only)

        self.assertEqual(len(c.right_only), 0,
                         "fdr2.nta has extra files: %s" % c.right_only)

        if len(c.diff_files) > 0:
            _LOGGER.warn(
                "Some bundle files differ: %s\n"
                "This is expected, as pickle.load() followed by "
                "pickle.dump() doesn't produce the same file", c.diff_files)
예제 #39
0
def createNetwork(dataSource):
  """Create the Network instance.

  The network has a sensor region reading data from `dataSource` and passing
  the encoded representation to an SPRegion. The SPRegion output is passed to
  a TPRegion.

  :param dataSource: a RecordStream instance to get data from
  :returns: a Network instance ready to run
  """
  network = Network()

  # Our input is sensor data from the gym file. The RecordSensor region
  # allows us to specify a file record stream as the input source via the
  # dataSource attribute.
  network.addRegion("sensor", "py.RecordSensor",
                    json.dumps({"verbosity": _VERBOSITY}))
  sensor = network.regions["sensor"].getSelf()
  # The RecordSensor needs to know how to encode the input values
  sensor.encoder = createEncoder()
  # Specify the dataSource as a file record stream instance
  sensor.dataSource = dataSource

  # Create the spatial pooler region
  SP_PARAMS["inputWidth"] = sensor.encoder.getWidth()
  network.addRegion("spatialPoolerRegion", "py.SPRegion", json.dumps(SP_PARAMS))

  # Link the SP region to the sensor input
  network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
  network.link("sensor", "spatialPoolerRegion", "UniformLink", "",
               srcOutput="resetOut", destInput="resetIn")
  network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
               srcOutput="spatialTopDownOut", destInput="spatialTopDownIn")
  network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
               srcOutput="temporalTopDownOut", destInput="temporalTopDownIn")

  # Add the TPRegion on top of the SPRegion
  network.addRegion("temporalPoolerRegion", "py.TPRegion",
                    json.dumps(TP_PARAMS))

  network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink", "")
  network.link("temporalPoolerRegion", "spatialPoolerRegion", "UniformLink", "",
               srcOutput="topDownOut", destInput="topDownIn")

  # Add the AnomalyRegion on top of the TPRegion
  network.addRegion("anomalyRegion", "py.AnomalyRegion", json.dumps({}))

  network.link("spatialPoolerRegion", "anomalyRegion", "UniformLink", "",
               srcOutput="bottomUpOut", destInput="activeColumns")
  network.link("temporalPoolerRegion", "anomalyRegion", "UniformLink", "",
               srcOutput="topDownOut", destInput="predictedColumns")

  network.initialize()

  spatialPoolerRegion = network.regions["spatialPoolerRegion"]

  # Make sure learning is enabled
  spatialPoolerRegion.setParameter("learningMode", True)
  # We want temporal anomalies so disable anomalyMode in the SP. This mode is
  # used for computing anomalies in a non-temporal model.
  spatialPoolerRegion.setParameter("anomalyMode", False)

  temporalPoolerRegion = network.regions["temporalPoolerRegion"]

  # Enable topDownMode to get the predicted columns output
  temporalPoolerRegion.setParameter("topDownMode", True)
  # Make sure learning is enabled (this is the default)
  temporalPoolerRegion.setParameter("learningMode", True)
  # Enable inference mode so we get predictions
  temporalPoolerRegion.setParameter("inferenceMode", True)
  # Enable anomalyMode to compute the anomaly score. This actually doesn't work
  # now so doesn't matter. We instead compute the anomaly score based on
  # topDownOut (predicted columns) and SP bottomUpOut (active columns).
  temporalPoolerRegion.setParameter("anomalyMode", True)

  return network
예제 #40
0
def createNetwork(dataSource):
    """Create the Network instance.

  The network has a sensor region reading data from `dataSource` and passing
  the encoded representation to an SPRegion. The SPRegion output is passed to
  a TPRegion.

  :param dataSource: a RecordStream instance to get data from
  :returns: a Network instance ready to run
  """
    network = Network()

    # Our input is sensor data from the gym file. The RecordSensor region
    # allows us to specify a file record stream as the input source via the
    # dataSource attribute.
    network.addRegion("sensor", "py.RecordSensor",
                      json.dumps({"verbosity": _VERBOSITY}))
    sensor = network.regions["sensor"].getSelf()
    # The RecordSensor needs to know how to encode the input values
    sensor.encoder = createEncoder()
    # Specify the dataSource as a file record stream instance
    sensor.dataSource = dataSource

    # Create the spatial pooler region
    SP_PARAMS["inputWidth"] = sensor.encoder.getWidth()
    network.addRegion("spatialPoolerRegion", "py.SPRegion",
                      json.dumps(SP_PARAMS))

    # Link the SP region to the sensor input
    network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
    network.link("sensor",
                 "spatialPoolerRegion",
                 "UniformLink",
                 "",
                 srcOutput="resetOut",
                 destInput="resetIn")
    network.link("spatialPoolerRegion",
                 "sensor",
                 "UniformLink",
                 "",
                 srcOutput="spatialTopDownOut",
                 destInput="spatialTopDownIn")
    network.link("spatialPoolerRegion",
                 "sensor",
                 "UniformLink",
                 "",
                 srcOutput="temporalTopDownOut",
                 destInput="temporalTopDownIn")

    # Add the TPRegion on top of the SPRegion
    network.addRegion("temporalPoolerRegion", "py.TPRegion",
                      json.dumps(TP_PARAMS))

    network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink",
                 "")
    network.link("temporalPoolerRegion",
                 "spatialPoolerRegion",
                 "UniformLink",
                 "",
                 srcOutput="topDownOut",
                 destInput="topDownIn")

    network.initialize()

    spatialPoolerRegion = network.regions["spatialPoolerRegion"]

    # Make sure learning is enabled
    spatialPoolerRegion.setParameter("learningMode", True)
    # We want temporal anomalies so disable anomalyMode in the SP. This mode is
    # used for computing anomalies in a non-temporal model.
    spatialPoolerRegion.setParameter("anomalyMode", False)

    temporalPoolerRegion = network.regions["temporalPoolerRegion"]

    # Enable topDownMode to get the predicted columns output
    temporalPoolerRegion.setParameter("topDownMode", True)
    # Make sure learning is enabled (this is the default)
    temporalPoolerRegion.setParameter("learningMode", True)
    # Enable inference mode so we get predictions
    temporalPoolerRegion.setParameter("inferenceMode", True)
    # Enable anomalyMode to compute the anomaly score. This actually doesn't work
    # now so doesn't matter. We instead compute the anomaly score based on
    # topDownOut (predicted columns) and SP bottomUpOut (active columns).
    temporalPoolerRegion.setParameter("anomalyMode", True)

    return network
예제 #41
0
class ClaClassifier():
    def __init__(self, net_structure, sensor_params, dest_region_params,
                 class_encoder_params):

        self.run_number = 0

        # for classifier
        self.classifier_encoder_list = {}
        self.classifier_input_list = {}
        self.prevPredictedColumns = {}

        # TODO: 消したいパラメータ
        self.predict_value = class_encoder_params.keys()[0]
        self.predict_step = 0

        # default param
        self.default_params = {
            'SP_PARAMS': {
                "spVerbosity": 0,
                "spatialImp": "cpp",
                "globalInhibition": 1,
                "columnCount": 2024,
                "inputWidth": 0,  # set later
                "numActiveColumnsPerInhArea": 20,
                "seed": 1956,
                "potentialPct": 0.8,
                "synPermConnected": 0.1,
                "synPermActiveInc": 0.05,
                "synPermInactiveDec": 0.0005,
                "maxBoost": 2.0,
            },
            'TP_PARAMS': {
                "verbosity": 0,
                "columnCount": 2024,
                "cellsPerColumn": 32,
                "inputWidth": 2024,
                "seed": 1960,
                "temporalImp": "cpp",
                "newSynapseCount": 20,
                "maxSynapsesPerSegment": 32,
                "maxSegmentsPerCell": 128,
                "initialPerm": 0.21,
                "permanenceInc": 0.2,
                "permanenceDec": 0.1,
                "globalDecay": 0.0,
                "maxAge": 0,
                "minThreshold": 12,
                "activationThreshold": 16,
                "outputType": "normal",
                "pamLength": 1,
            },
            'CLASSIFIER_PARAMS': {
                "clVerbosity": 0,
                "alpha": 0.005,
                "steps": "0"
            }
        }

        # tp
        self.tp_enable = True

        # net structure
        self.net_structure = OrderedDict()
        self.net_structure['sensor3'] = ['region1']
        self.net_structure['region1'] = ['region2']

        self.net_structure = net_structure

        # region change params
        self.dest_region_params = dest_region_params

        # sensor change params
        self.sensor_params = sensor_params

        self.class_encoder_params = class_encoder_params

        self._createNetwork()

    def _makeRegion(self, name, params):
        sp_name = "sp_" + name
        if self.tp_enable:
            tp_name = "tp_" + name
        class_name = "class_" + name

        # addRegion
        self.network.addRegion(sp_name, "py.SPRegion",
                               json.dumps(params['SP_PARAMS']))
        if self.tp_enable:
            self.network.addRegion(tp_name, "py.TPRegion",
                                   json.dumps(params['TP_PARAMS']))
        self.network.addRegion(class_name, "py.CLAClassifierRegion",
                               json.dumps(params['CLASSIFIER_PARAMS']))

        encoder = MultiEncoder()
        encoder.addMultipleEncoders(self.class_encoder_params)
        self.classifier_encoder_list[class_name] = encoder
        if self.tp_enable:
            self.classifier_input_list[class_name] = tp_name
        else:
            self.classifier_input_list[class_name] = sp_name

    def _linkRegion(self, src_name, dest_name):
        sensor = src_name
        sp_name = "sp_" + dest_name
        tp_name = "tp_" + dest_name
        class_name = "class_" + dest_name

        if self.tp_enable:
            self.network.link(sensor, sp_name, "UniformLink", "")
            self.network.link(sp_name, tp_name, "UniformLink", "")
            self.network.link(tp_name, class_name, "UniformLink", "")
        else:
            self.network.link(sensor, sp_name, "UniformLink", "")
            self.network.link(sp_name, class_name, "UniformLink", "")

    def _initRegion(self, name):
        sp_name = "sp_" + name
        tp_name = "tp_" + name
        class_name = "class_" + name

        # setting sp
        SP = self.network.regions[sp_name]
        SP.setParameter("learningMode", True)
        SP.setParameter("anomalyMode", True)

        # # setting tp
        if self.tp_enable:
            TP = self.network.regions[tp_name]
            TP.setParameter("topDownMode", False)
            TP.setParameter("learningMode", True)
            TP.setParameter("inferenceMode", True)
            TP.setParameter("anomalyMode", False)

        # classifier regionを定義.
        classifier = self.network.regions[class_name]
        classifier.setParameter('inferenceMode', True)
        classifier.setParameter('learningMode', True)

    def _createNetwork(self):
        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            if update is None:
                return None
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update

        self.network = Network()

        # check
        # if self.selectivity not in self.dest_region_params.keys():
        #     raise Exception, "There is no selected region : " + self.selectivity
        if not len(self.net_structure.keys()) == len(
                set(self.net_structure.keys())):
            raise Exception, "There is deplicated net_structure keys : " + self.net_structure.keys(
            )

        # sensor
        for sensor_name, params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor",
                                   json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            #params = deepupdate(cn.SENSOR_PARAMS, params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders(params)
            sensor.encoder = encoder
            sensor.dataSource = DataBuffer()

        # network
        print 'create element ...'
        for name in self.dest_region_params.keys():
            change_params = self.dest_region_params[name]
            params = deepupdate(self.default_params, change_params)
            # input width
            input_width = 0
            for source in [
                    s for s, d in self.net_structure.items() if name in d
            ]:
                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    input_width += sensor.encoder.getWidth()
                else:
                    input_width += params['TP_PARAMS'][
                        'cellsPerColumn'] * params['TP_PARAMS']['columnCount']

            params['SP_PARAMS']['inputWidth'] = input_width
            self._makeRegion(name, params)

        # link
        print 'link network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                if source in self.sensor_params.keys():
                    self._linkRegion(source, dest)
                else:
                    if self.tp_enable:
                        self._linkRegion("tp_" + source, dest)
                    else:
                        self._linkRegion("sp_" + source, dest)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in self.dest_region_params.keys():
            self._initRegion(name)

        return

    #@profile
    def run(self, input_data, learn=True, class_learn=True, learn_layer=None):
        """
        networkの実行.
        学習したいときは, learn=True, ftypeを指定する.
        予測したいときは, learn=False, ftypeはNoneを指定する.
        学習しているときも, 予測はしているがな.

        input_data = {'xy_value': [1.0, 2.0], 'ftype': 'sin'}
        """

        self.enable_learning_mode(learn, learn_layer)
        self.enable_class_learning_mode(class_learn)

        self.run_number += 1

        # calc encoder, SP, TP
        for sensor_name in self.sensor_params.keys():
            self.network.regions[sensor_name].getSelf().dataSource.push(
                input_data)
        self.network.run(1)
        #self.layer_output(input_data)
        #self.debug(input_data)

        # learn classifier
        inferences = {}
        for name in self.dest_region_params.keys():
            class_name = "class_" + name
            inferences['classifier_' + name] = self._learn_classifier_multi(
                class_name,
                actValue=input_data[self.predict_value],
                pstep=self.predict_step)

        # anomaly
        #inferences["anomaly"] = self._calc_anomaly()

        return inferences

    def _learn_classifier_multi(self, region_name, actValue=None, pstep=0):
        """
        classifierの計算を行う.

        直接customComputeを呼び出さずに, network.runの中でやりたいところだけど,
        計算した内容の取り出し方法がわからない.
        """

        # TODO: networkとclassifierを完全に切り分けたいな.
        #       networkでは, sensor,sp,tpまで計算を行う.
        #       その計算結果の評価/利用は外に出す.

        classifier = self.network.regions[region_name]
        encoder = self.classifier_encoder_list[region_name].getEncoderList()[0]
        class_input = self.classifier_input_list[region_name]
        tp_bottomUpOut = self.network.regions[class_input].getOutputData(
            "bottomUpOut").nonzero()[0]
        #tp_bottomUpOut = self.network.regions["TP"].getSelf()._tfdr.infActiveState['t'].reshape(-1).nonzero()[0]

        if actValue is not None:
            bucketIdx = encoder.getBucketIndices(actValue)[0]
            classificationIn = {'bucketIdx': bucketIdx, 'actValue': actValue}
        else:
            classificationIn = {'bucketIdx': 0, 'actValue': 'no'}
        clResults = classifier.getSelf().customCompute(
            recordNum=self.run_number,
            patternNZ=tp_bottomUpOut,
            classification=classificationIn)

        inferences = self._get_inferences(clResults, pstep, summary_tyep='sum')

        return inferences

    def _get_inferences(self, clResults, steps, summary_tyep='sum'):
        """
        classifierの計算結果を使いやすいように変更するだけ.
        """

        likelihoodsVec = clResults[steps]
        bucketValues = clResults['actualValues']

        likelihoodsDict = defaultdict(int)
        bestActValue = None
        bestProb = None

        if summary_tyep == 'sum':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                likelihoodsDict[actValue] += prob
                if bestProb is None or likelihoodsDict[actValue] > bestProb:
                    bestProb = likelihoodsDict[actValue]
                    bestActValue = actValue

        elif summary_tyep == 'best':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                if bestProb is None or prob > bestProb:
                    likelihoodsDict[actValue] = prob
                    bestProb = prob
                    bestActValue = actValue

        return {
            'likelihoodsDict': likelihoodsDict,
            'best': {
                'value': bestActValue,
                'prob': bestProb
            }
        }

    def _calc_anomaly(self):
        """
        各層のanomalyを計算
        """

        score = 0
        anomalyScore = {}
        for name in self.dest_region_params.keys():
            #sp_bottomUpOut = self.network.regions["sp_"+name].getOutputData("bottomUpOut").nonzero()[0]
            sp_bottomUpOut = self.network.regions["tp_" + name].getInputData(
                "bottomUpIn").nonzero()[0]

            if self.prevPredictedColumns.has_key(name):
                score = computeAnomalyScore(sp_bottomUpOut,
                                            self.prevPredictedColumns[name])
            #topdown_predict = self.network.regions["TP"].getSelf()._tfdr.topDownCompute().copy().nonzero()[0]
            topdown_predict = self.network.regions[
                "tp_" + name].getSelf()._tfdr.topDownCompute().nonzero()[0]
            self.prevPredictedColumns[name] = copy.deepcopy(topdown_predict)

            anomalyScore[name] = score

        return anomalyScore

    def reset(self):
        """
        reset sequence
        """
        # for name in self.dest_region_params.keys():
        #     self.network.regions["tp_"+name].getSelf().resetSequenceStates()
        return

        # for sensor_name in self.sensor_params.keys():
        #     sensor = self.network.regions[sensor_name].getSelf()
        #     sensor.dataSource = DataBuffer()

    def enable_class_learning_mode(self, enable):
        for name in self.dest_region_params.keys():
            self.network.regions["class_" + name].setParameter(
                "learningMode", enable)

    def enable_learning_mode(self, enable, layer_name=None):
        """
        各層のSP, TP, ClassifierのlearningModeを変更
        """
        if layer_name is None:
            for name in self.dest_region_params.keys():
                self.network.regions["sp_" + name].setParameter(
                    "learningMode", enable)
                if self.tp_enable:
                    self.network.regions["tp_" + name].setParameter(
                        "learningMode", enable)
                self.network.regions["class_" + name].setParameter(
                    "learningMode", enable)
        else:
            for name in self.dest_region_params.keys():
                self.network.regions["sp_" + name].setParameter(
                    "learningMode", not enable)
                if self.tp_enable:
                    self.network.regions["tp_" + name].setParameter(
                        "learningMode", not enable)
                self.network.regions["class_" + name].setParameter(
                    "learningMode", not enable)
            for name in layer_name:
                self.network.regions["sp_" + name].setParameter(
                    "learningMode", enable)
                if self.tp_enable:
                    self.network.regions["tp_" + name].setParameter(
                        "learningMode", enable)
                self.network.regions["class_" + name].setParameter(
                    "learningMode", enable)

    def print_inferences(self, input_data, inferences):
        """
        計算結果を出力する
        """

        # print "%10s, %10s, %1s" % (
        #         int(input_data['xy_value'][0]),
        #         int(input_data['xy_value'][1]),
        #         input_data['label'][:1]),
        print "%5s" % (input_data['label']),

        try:
            for name in sorted(self.dest_region_params.keys()):
                print "%5s" % (inferences['classifier_' +
                                          name]['best']['value']),

            for name in sorted(self.dest_region_params.keys()):
                print "%6.4f," % (inferences['classifier_' +
                                             name]['likelihoodsDict']
                                  [input_data[self.predict_value]]),
        except:
            pass

        # for name in sorted(self.dest_region_params.keys()):
        #     print "%3.2f," % (inferences["anomaly"][name]),

        # for name in sorted(self.dest_region_params.keys()):
        #     print "%5s," % name,

        print

    def layer_output(self, input_data, region_name=None):
        if region_name is not None:
            Region = self.network.regions[region_name]
            print Region.getOutputData("bottomUpOut").nonzero()[0]
            return

        for name in self.dest_region_params.keys():
            SPRegion = self.network.regions["sp_" + name]
            if self.tp_enable:
                TPRegion = self.network.regions["tp_" + name]

            print "#################################### ", name
            print
            print "==== SP layer ===="
            print "input:  ", SPRegion.getInputData(
                "bottomUpIn").nonzero()[0][:20]
            print "output: ", SPRegion.getOutputData(
                "bottomUpOut").nonzero()[0][:20]
            print
            if self.tp_enable:
                print "==== TP layer ===="
                print "input:  ", TPRegion.getInputData(
                    "bottomUpIn").nonzero()[0][:20]
                print "output: ", TPRegion.getOutputData(
                    "bottomUpOut").nonzero()[0][:20]
                print
            print "==== Predict ===="
            print TPRegion.getSelf()._tfdr.topDownCompute().copy().nonzero(
            )[0][:20]
            print

    def save(self, path):
        import pickle
        with open(path, 'wb') as modelPickleFile:
            pickle.dump(self, modelPickleFile)
예제 #42
0
    def __init__(self,
                 numColumns,
                 L2Params,
                 L4Params,
                 L6aParams,
                 repeat,
                 logCalls=False):
        """
    Create a network consisting of multiple columns. Each column contains one L2,
    one L4 and one L6a layers. In addition all the L2 columns are fully
    connected to each other through their lateral inputs.

    :param numColumns: Number of columns to create
    :type numColumns: int
    :param L2Params: constructor parameters for :class:`ColumnPoolerRegion`
    :type L2Params: dict
    :param L4Params:  constructor parameters for :class:`ApicalTMPairRegion`
    :type L4Params: dict
    :param L6aParams: constructor parameters for :class:`GridCellLocationRegion`
    :type L6aParams: dict
    :param repeat: Number of times each pair should be seen to be learned
    :type repeat: int
    :param logCalls: If true, calls to main functions will be logged internally.
                     The log can then be saved with saveLogs(). This allows us
                     to recreate the complete network behavior using
                     rerunExperimentFromLogfile which is very useful for
                     debugging.
    :type logCalls: bool
    """
        # Handle logging - this has to be done first
        self.logCalls = logCalls

        self.numColumns = numColumns
        self.repeat = repeat

        network = Network()
        self.network = createMultipleL246aLocationColumn(
            network=network,
            numberOfColumns=self.numColumns,
            L2Params=L2Params,
            L4Params=L4Params,
            L6aParams=L6aParams)
        network.initialize()

        self.sensorInput = []
        self.motorInput = []
        self.L2Regions = []
        self.L4Regions = []
        self.L6aRegions = []
        for i in xrange(self.numColumns):
            col = str(i)
            self.sensorInput.append(network.regions["sensorInput_" +
                                                    col].getSelf())
            self.motorInput.append(network.regions["motorInput_" +
                                                   col].getSelf())
            self.L2Regions.append(network.regions["L2_" + col])
            self.L4Regions.append(network.regions["L4_" + col])
            self.L6aRegions.append(network.regions["L6a_" + col])

        if L6aParams is not None and "dimensions" in L6aParams:
            self.dimensions = L6aParams["dimensions"]
        else:
            self.dimensions = 2

        self.sdrSize = L2Params["sdrSize"]

        # will be populated during training
        self.learnedObjects = {}
예제 #43
0
    def testSimpleImageNetwork(self):

        # Create the network and get region instances
        net = Network()
        net.addRegion("sensor", "py.ImageSensor", "{width: 32, height: 32}")
        net.addRegion("classifier", "py.KNNClassifierRegion",
                      "{distThreshold: 0.01, maxCategoryCount: 2}")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="dataOut",
                 destInput="bottomUpIn")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="categoryOut",
                 destInput="categoryIn")
        net.initialize()
        sensor = net.regions['sensor']
        classifier = net.regions['classifier']

        # Create a dataset with two categories, one image in each category
        # Each image consists of a unique rectangle
        tmpDir = tempfile.mkdtemp()
        os.makedirs(os.path.join(tmpDir, '0'))
        os.makedirs(os.path.join(tmpDir, '1'))

        im0 = Image.new("L", (32, 32))
        draw = ImageDraw.Draw(im0)
        draw.rectangle((10, 10, 20, 20), outline=255)
        im0.save(os.path.join(tmpDir, '0', 'im0.png'))

        im1 = Image.new("L", (32, 32))
        draw = ImageDraw.Draw(im1)
        draw.rectangle((15, 15, 25, 25), outline=255)
        im1.save(os.path.join(tmpDir, '1', 'im1.png'))

        # Load the dataset
        sensor.executeCommand(["loadMultipleImages", tmpDir])
        numImages = sensor.getParameter('numImages')
        self.assertEqual(numImages, 2)

        # Ensure learning is turned ON
        self.assertEqual(classifier.getParameter('learningMode'), 1)

        # Train the network (by default learning is ON in the classifier)
        # and then turn off learning and turn on inference mode
        net.run(2)
        classifier.setParameter('inferenceMode', 1)
        classifier.setParameter('learningMode', 0)

        # Check to make sure learning is turned OFF and that the classifier learned
        # something
        self.assertEqual(classifier.getParameter('learningMode'), 0)
        self.assertEqual(classifier.getParameter('inferenceMode'), 1)
        self.assertEqual(classifier.getParameter('categoryCount'), 2)
        self.assertEqual(classifier.getParameter('patternCount'), 2)

        # Now test the network to make sure it categories the images correctly
        numCorrect = 0
        for i in range(2):
            net.run(1)
            inferredCategory = classifier.getOutputData(
                'categoriesOut').argmax()
            if sensor.getOutputData('categoryOut') == inferredCategory:
                numCorrect += 1

        self.assertEqual(numCorrect, 2)

        # Cleanup the temp files
        os.unlink(os.path.join(tmpDir, '0', 'im0.png'))
        os.unlink(os.path.join(tmpDir, '1', 'im1.png'))
        os.removedirs(os.path.join(tmpDir, '0'))
        os.removedirs(os.path.join(tmpDir, '1'))
예제 #44
0
def _createNetwork(inverseReadoutResolution, anchorInputSize, dualPhase=False):
    """
  Create a simple network connecting sensor and motor inputs to the location
  region. Use :meth:`RawSensor.addDataToQueue` to add sensor input and growth
  candidates. Use :meth:`RawValues.addDataToQueue` to add motor input.
  ::
                        +----------+
    [   sensor*   ] --> |          | --> [     activeCells        ]
    [ candidates* ] --> | location | --> [    learnableCells      ]
    [    motor    ] --> |          | --> [ sensoryAssociatedCells ]
                        +----------+

  :param inverseReadoutResolution:
    Specifies the diameter of the circle of phases in the rhombus encoded by a
    bump.
  :type inverseReadoutResolution: int

  :type anchorInputSize: int
  :param anchorInputSize:
    The number of input bits in the anchor input.

  .. note::
    (*) This function will only add the 'sensor' and 'candidates' regions when
    'anchorInputSize' is greater than zero. This is useful if you would like to
    compute locations ignoring sensor input

  .. seealso::
     - :py:func:`htmresearch.frameworks.location.path_integration_union_narrowing.createRatModuleFromReadoutResolution`

  """
    net = Network()

    # Create simple region to pass motor commands as displacement vectors (dx, dy)
    net.addRegion("motor", "py.RawValues", json.dumps({"outputWidth": 2}))

    if anchorInputSize > 0:
        # Create simple region to pass growth candidates
        net.addRegion("candidates", "py.RawSensor",
                      json.dumps({"outputWidth": anchorInputSize}))

        # Create simple region to pass sensor input
        net.addRegion("sensor", "py.RawSensor",
                      json.dumps({"outputWidth": anchorInputSize}))

    # Initialize region with 5 modules varying scale by sqrt(2) and 4 different
    # random orientations for each scale
    scale = []
    orientation = []
    for i in xrange(5):
        for _ in xrange(4):
            angle = np.radians(random.gauss(7.5, 7.5))
            orientation.append(random.choice([angle, -angle]))
            scale.append(10.0 * (math.sqrt(2)**i))

    # Create location region
    params = computeRatModuleParametersFromReadoutResolution(
        inverseReadoutResolution)
    params.update({
        "moduleCount": len(scale),
        "scale": scale,
        "orientation": orientation,
        "anchorInputSize": anchorInputSize,
        "activationThreshold": 8,
        "initialPermanence": 1.0,
        "connectedPermanence": 0.5,
        "learningThreshold": 8,
        "sampleSize": 10,
        "permanenceIncrement": 0.1,
        "permanenceDecrement": 0.0,
        "dualPhase": dualPhase,
        "bumpOverlapMethod": "probabilistic"
    })
    net.addRegion("location", "py.GridCellLocationRegion", json.dumps(params))

    if anchorInputSize > 0:
        # Link sensor
        net.link("sensor",
                 "location",
                 "UniformLink",
                 "",
                 srcOutput="dataOut",
                 destInput="anchorInput")
        net.link("sensor",
                 "location",
                 "UniformLink",
                 "",
                 srcOutput="resetOut",
                 destInput="resetIn")
        net.link("candidates",
                 "location",
                 "UniformLink",
                 "",
                 srcOutput="dataOut",
                 destInput="anchorGrowthCandidates")

    # Link motor input
    net.link("motor",
             "location",
             "UniformLink",
             "",
             srcOutput="dataOut",
             destInput="displacement")

    # Initialize network objects
    net.initialize()

    return net
  def reset(self, params, repetition):
    """
    Take the steps necessary to reset the experiment before each repetition:
      - Make sure random seed is different for each repetition
      - Create the L2-L4-L6a network
      - Generate objects used by the experiment
      - Learn all objects used by the experiment
    """
    print params["name"], ":", repetition

    self.debug = params.get("debug", False)

    L2Params = json.loads('{' + params["l2_params"] + '}')
    L4Params = json.loads('{' + params["l4_params"] + '}')
    L6aParams = json.loads('{' + params["l6a_params"] + '}')

    # Make sure random seed is different for each repetition
    seed = params.get("seed", 42)
    np.random.seed(seed + repetition)
    random.seed(seed + repetition)
    L2Params["seed"] = seed + repetition
    L4Params["seed"] = seed + repetition
    L6aParams["seed"] = seed + repetition

    # Configure L6a params
    numModules = params["num_modules"]
    L6aParams["scale"] = [params["scale"]] * numModules
    angle = params["angle"] / numModules
    orientation = range(angle / 2, angle * numModules, angle)
    L6aParams["orientation"] = np.radians(orientation).tolist()

    # Create multi-column L2-L4-L6a network
    self.numColumns = params["num_cortical_columns"]
    network = Network()
    network = createMultipleL246aLocationColumn(network=network,
                                                numberOfColumns=self.numColumns,
                                                L2Params=L2Params,
                                                L4Params=L4Params,
                                                L6aParams=L6aParams)
    network.initialize()

    self.network = network
    self.sensorInput = []
    self.motorInput = []
    self.L2Regions = []
    self.L4Regions = []
    self.L6aRegions = []
    for i in xrange(self.numColumns):
      col = str(i)
      self.sensorInput.append(network.regions["sensorInput_" + col].getSelf())
      self.motorInput.append(network.regions["motorInput_" + col].getSelf())
      self.L2Regions.append(network.regions["L2_" + col])
      self.L4Regions.append(network.regions["L4_" + col])
      self.L6aRegions.append(network.regions["L6a_" + col])

    # Use the number of iterations as the number of objects. This will allow us
    # to execute one iteration per object and use the "iteration" parameter as
    # the object index
    numObjects = params["iterations"]

    # Generate feature SDRs
    numFeatures = params["num_features"]
    numOfMinicolumns = L4Params["columnCount"]
    numOfActiveMinicolumns = params["num_active_minicolumns"]
    self.featureSDR = [{
      str(f): sorted(np.random.choice(numOfMinicolumns, numOfActiveMinicolumns))
      for f in xrange(numFeatures)
    } for _ in xrange(self.numColumns)]

    # Generate objects used in the experiment
    self.objects = generateObjects(numObjects=numObjects,
                                   featuresPerObject=params["features_per_object"],
                                   objectWidth=params["object_width"],
                                   numFeatures=numFeatures,
                                   distribution=params["feature_distribution"])

    # Make sure the objects are unique
    uniqueObjs = np.unique([{"features": obj["features"]}
                            for obj in self.objects])
    assert len(uniqueObjs) == len(self.objects)

    self.sdrSize = L2Params["sdrSize"]

    # Learn objects
    self.numLearningPoints = params["num_learning_points"]
    self.numOfSensations = params["num_sensations"]
    self.learnedObjects = {}
    self.learn()
def createNetwork(dataSource):
  """Create the Network instance.

  :param dataSource: a RecordStream instance to get data from
  :returns: a Network instance ready to run
  """
  
  network = Network()

  #----- SENSOR REGION -----#
  
  # Input data comes from a CSV file (scalar values, labels). The RecordSensor region
  # allows us to specify a file record stream as the input source via the
  # dataSource attribute.
  network.addRegion("sensor", "py.RecordSensor",
                    json.dumps({"verbosity": _VERBOSITY}))
  sensor = network.regions["sensor"].getSelf()

  # The RecordSensor needs to know how to encode the input values
  sensor.encoder = createScalarEncoder()

  # Specify the dataSource as a file record stream instance
  sensor.dataSource = dataSource

  # Region width
  prevRegionWidth = sensor.encoder.getWidth()


  #----- SPATIAL POOLER -----#

  # Create the spatial pooler region
  SP_PARAMS["inputWidth"] = prevRegionWidth
  network.addRegion("SP", "py.SPRegion", json.dumps(SP_PARAMS))

  # Link the SP region to the sensor input
  network.link("sensor", "SP", "UniformLink", "")
  
  # Forward the sensor region sequence reset to the SP
  network.link("sensor", "SP", "UniformLink", "", srcOutput="resetOut", destInput="resetIn")
  
  # Make sure learning is ON
  spatialPoolerRegion = network.regions["SP"]
  spatialPoolerRegion.setParameter("learningMode", True)
  
  # Inference mode outputs the current inference (e.g. active columns). 
  # It's ok to always leave inference mode on - it's only there for some corner cases.
  spatialPoolerRegion.setParameter('inferenceMode', True)

  # Region width
  prevRegionWidth = SP_PARAMS['columnCount']

  
  #----- TEMPORAL MEMORY -----#
  
  # Make sure region widths fit
  assert TM_PARAMS['columnCount'] == prevRegionWidth
  TM_PARAMS['inputWidth'] = TM_PARAMS['columnCount']
  
  # Create the TM region
  network.addRegion("TM", "py.TPRegion", json.dumps(TM_PARAMS))

  # Feed forward link from SP to TM
  network.link("SP", "TM", "UniformLink", "", srcOutput="bottomUpOut", destInput="bottomUpIn")
  
  # Feedback links (unnecessary ?)
  network.link("TM", "SP", "UniformLink", "", srcOutput="topDownOut", destInput="topDownIn")
  network.link("TM", "sensor", "UniformLink", "", srcOutput="topDownOut", destInput="temporalTopDownIn")

  # Forward the sensor region sequence reset to the TM
  network.link("sensor", "TM", "UniformLink", "", srcOutput="resetOut", destInput="resetIn")

  # Make sure learning is enabled (this is the default)
  temporalMemoryRegion = network.regions["TM"]
  temporalMemoryRegion.setParameter("learningMode", False)
  
  # Inference mode outputs the current inference (e.g. active cells). 
  # It's ok to always leave inference mode on - it's only there for some corner cases.
  temporalMemoryRegion.setParameter('inferenceMode', True)
  
  # Region width
  prevRegionWidth = TM_PARAMS['inputWidth']


  #----- CLASSIFIER REGION -----#

  # create classifier region
  network.addRegion('classifier', 'py.CLAClassifierRegion', json.dumps(CLA_CLASSIFIER_PARAMS))

  # feed the TM states to the classifier
  network.link("TM", "classifier", "UniformLink", "", srcOutput = "bottomUpOut", destInput = "bottomUpIn")
  
  
  # create a link from the sensor to the classifier to send in category labels.
  # TODO: this link is actually useless right now because the CLAclassifier region compute() function doesn't work
  # and that we are feeding TM states & categories manually to the classifier via the customCompute() function. 
  network.link("sensor", "classifier", "UniformLink", "", srcOutput = "categoryOut", destInput = "categoryIn")

  # disable learning for now (will be enables in a later training phase)
  classifier =  network.regions["classifier"]
  classifier.setParameter('learningMode', False)

  # Inference mode outputs the current inference. We can always leave it on.
  classifier.setParameter('inferenceMode', True)

  

  #------ INITIALIZE -----#  
  
  # The network until you try to run it. Make sure it's initialized right away.
  network.initialize()

  return network
def createNetwork(dataSource):
  """Create the Network instance.

  The network has a sensor region reading data from `dataSource` and passing
  the encoded representation to an SPRegion. The SPRegion output is passed to
  a TPRegion.

  :param dataSource: a RecordStream instance to get data from
  :returns: a Network instance ready to run
  """
  network = Network()

  # Our input is sensor data from the gym file. The RecordSensor region
  # allows us to specify a file record stream as the input source via the
  # dataSource attribute.
  network.addRegion("sensor", "py.RecordSensor",
                    json.dumps({"verbosity": _VERBOSITY}))
  sensor = network.regions["sensor"].getSelf()
  # The RecordSensor needs to know how to encode the input values
  sensor.encoder = createEncoder()
  # Specify the dataSource as a file record stream instance
  sensor.dataSource = dataSource

  # Create the spatial pooler region
  SP_PARAMS["inputWidth"] = sensor.encoder.getWidth()
  network.addRegion("spatialPoolerRegion", "py.SPRegion", json.dumps(SP_PARAMS))

  # Link the SP region to the sensor input
  network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
  network.link("sensor", "spatialPoolerRegion", "UniformLink", "",
               srcOutput="resetOut", destInput="resetIn")
  network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
               srcOutput="spatialTopDownOut", destInput="spatialTopDownIn")
  network.link("spatialPoolerRegion", "sensor", "UniformLink", "",
               srcOutput="temporalTopDownOut", destInput="temporalTopDownIn")

  # Add the TPRegion on top of the SPRegion
  # TODO: Needs TMRegion
  network.addRegion("temporalMemoryRegion", "py.TPRegion",
                    json.dumps(TP_PARAMS))

  network.link("spatialPoolerRegion", "temporalMemoryRegion", "UniformLink", "")
  network.link("temporalMemoryRegion", "spatialPoolerRegion", "UniformLink", "",
               srcOutput="topDownOut", destInput="topDownIn")

  # Register TPRegion since we aren't in nupic
  curDirectory = os.path.dirname(os.path.abspath(__file__))
  # directory containing the union pooler directory is 2 directories above this file
  unionTemporalPoolerDirectory = os.path.split((os.path.split(curDirectory))[0])[0]
  sys.path.append(unionTemporalPoolerDirectory)
  Network.registerRegionPackage("union_temporal_pooling")

  # Add the TPRegion on top of the TPRegion
  temporal = network.regions["temporalMemoryRegion"].getSelf()
  UP_PARAMS["inputWidth"] = temporal.getOutputElementCount("bottomUpOut")
  network.addRegion("unionTemporalPoolerRegion", "py.TemporalPoolerRegion", json.dumps(UP_PARAMS))

  network.link("temporalMemoryRegion", "unionTemporalPoolerRegion", "UniformLink", "",
               srcOutput="activeCells", destInput="activeCells")
  network.link("temporalMemoryRegion", "unionTemporalPoolerRegion", "UniformLink", "",
               srcOutput="predictedActiveCells", destInput="predictedActiveCells")

  network.initialize()

  spatial = network.regions["spatialPoolerRegion"].getSelf()
  # Make sure learning is enabled (this is the default)
  spatial.setParameter("learningMode", 1, True)
  # We want temporal anomalies so disable anomalyMode in the SP. This mode is
  # used for computing anomalies in a non-temporal model.
  spatial.setParameter("anomalyMode", 1, False)

  # Enable topDownMode to get the predicted columns output
  temporal.setParameter("topDownMode", 1, True)
  # Make sure learning is enabled (this is the default)
  temporal.setParameter("learningMode", 1, True)
  # Enable inference mode so we get predictions
  temporal.setParameter("inferenceMode", 1, True)
  temporal.setParameter("computePredictedActiveCellIndices", 1, True)

  union = network.regions["unionTemporalPoolerRegion"].getSelf()
  # Make sure learning is enabled (this is the default)
  union.setParameter("learningMode", 1, True)

  return network
    def testCreateL4L6aLocationColumn(self):
        """
    Test 'createL4L6aLocationColumn' by inferring a set of hand crafted objects
    """
        scale = []
        orientation = []
        # Initialize L6a location region with 5 modules varying scale by sqrt(2) and
        # 4 different random orientations for each scale
        for i in xrange(5):
            for _ in xrange(4):
                angle = np.radians(random.gauss(7.5, 7.5))
                orientation.append(random.choice([angle, -angle]))
                scale.append(10.0 * (math.sqrt(2)**i))

        net = Network()
        createL4L6aLocationColumn(
            net, {
                "inverseReadoutResolution": 8,
                "sensorInputSize": NUM_OF_CELLS,
                "L4Params": {
                    "columnCount": NUM_OF_COLUMNS,
                    "cellsPerColumn": CELLS_PER_COLUMN,
                    "activationThreshold": 15,
                    "minThreshold": 15,
                    "initialPermanence": 1.0,
                    "implementation": "ApicalTiebreak",
                    "maxSynapsesPerSegment": -1
                },
                "L6aParams": {
                    "moduleCount": len(scale),
                    "scale": scale,
                    "orientation": orientation,
                    "anchorInputSize": NUM_OF_CELLS,
                    "activationThreshold": 8,
                    "initialPermanence": 1.0,
                    "connectedPermanence": 0.5,
                    "learningThreshold": 8,
                    "sampleSize": 10,
                    "permanenceIncrement": 0.1,
                    "permanenceDecrement": 0.0,
                    "bumpOverlapMethod": "probabilistic"
                }
            })
        net.initialize()

        L4 = net.regions['L4']
        L6a = net.regions['L6a']
        sensor = net.regions['sensorInput'].getSelf()
        motor = net.regions['motorInput'].getSelf()

        # Keeps a list of learned objects
        learnedRepresentations = defaultdict(list)

        # Learn Objects
        self._setLearning(net, True)

        for objectDescription in OBJECTS:
            reset = True
            previousLocation = None
            L6a.executeCommand(["activateRandomLocation"])

            for iFeature, feature in enumerate(objectDescription["features"]):
                # Move the sensor to the center of the object
                locationOnObject = np.array([
                    feature["top"] + feature["height"] / 2.,
                    feature["left"] + feature["width"] / 2.
                ])

                # Calculate displacement from previous location
                if previousLocation is not None:
                    motor.addDataToQueue(locationOnObject - previousLocation)
                previousLocation = locationOnObject

                # Sense feature at location
                sensor.addDataToQueue(FEATURE_ACTIVE_COLUMNS[feature["name"]],
                                      reset, 0)
                net.run(1)
                reset = False

                # Save learned representations
                representation = L6a.getOutputData("sensoryAssociatedCells")
                representation = representation.nonzero()[0]
                learnedRepresentations[(objectDescription["name"],
                                        iFeature)] = representation

        # Infer objects
        self._setLearning(net, False)

        for objectDescription in OBJECTS:
            reset = True
            previousLocation = None
            inferred = False

            features = objectDescription["features"]
            touchSequence = range(len(features))
            random.shuffle(touchSequence)

            for iFeature in touchSequence:
                feature = features[iFeature]

                # Move the sensor to the center of the object
                locationOnObject = np.array([
                    feature["top"] + feature["height"] / 2.,
                    feature["left"] + feature["width"] / 2.
                ])

                # Calculate displacement from previous location
                if previousLocation is not None:
                    motor.addDataToQueue(locationOnObject - previousLocation)
                previousLocation = locationOnObject

                # Sense feature at location
                sensor.addDataToQueue(FEATURE_ACTIVE_COLUMNS[feature["name"]],
                                      reset, 0)
                net.run(1)
                reset = False

                representation = L6a.getOutputData("sensoryAssociatedCells")
                representation = representation.nonzero()[0]
                target_representations = set(
                    learnedRepresentations[(objectDescription["name"],
                                            iFeature)])

                inferred = (set(representation) <= target_representations)
                if inferred:
                    break

            self.assertTrue(inferred)
예제 #49
0
  def testSimpleImageNetwork(self):

    # Create the network and get region instances
    net = Network()
    net.addRegion("sensor", "py.ImageSensor", "{width: 32, height: 32}")
    net.addRegion("classifier","py.KNNClassifierRegion",
                  "{distThreshold: 0.01, maxCategoryCount: 2}")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput = "dataOut", destInput = "bottomUpIn")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput = "categoryOut", destInput = "categoryIn")
    net.initialize()
    sensor = net.regions['sensor']
    classifier = net.regions['classifier']

    # Create a dataset with two categories, one image in each category
    # Each image consists of a unique rectangle
    tmpDir = tempfile.mkdtemp()
    os.makedirs(os.path.join(tmpDir,'0'))
    os.makedirs(os.path.join(tmpDir,'1'))

    im0 = Image.new("L",(32,32))
    draw = ImageDraw.Draw(im0)
    draw.rectangle((10,10,20,20), outline=255)
    im0.save(os.path.join(tmpDir,'0','im0.png'))

    im1 = Image.new("L",(32,32))
    draw = ImageDraw.Draw(im1)
    draw.rectangle((15,15,25,25), outline=255)
    im1.save(os.path.join(tmpDir,'1','im1.png'))

    # Load the dataset
    sensor.executeCommand(["loadMultipleImages", tmpDir])
    numImages = sensor.getParameter('numImages')
    self.assertEqual(numImages, 2)

    # Ensure learning is turned ON
    self.assertEqual(classifier.getParameter('learningMode'), 1)

    # Train the network (by default learning is ON in the classifier)
    # and then turn off learning and turn on inference mode
    net.run(2)
    classifier.setParameter('inferenceMode', 1)
    classifier.setParameter('learningMode', 0)

    # Check to make sure learning is turned OFF and that the classifier learned
    # something
    self.assertEqual(classifier.getParameter('learningMode'), 0)
    self.assertEqual(classifier.getParameter('inferenceMode'), 1)
    self.assertEqual(classifier.getParameter('categoryCount'),2)
    self.assertEqual(classifier.getParameter('patternCount'),2)

    # Now test the network to make sure it categories the images correctly
    numCorrect = 0
    for i in range(2):
      net.run(1)
      inferredCategory = classifier.getOutputData('categoriesOut').argmax()
      if sensor.getOutputData('categoryOut') == inferredCategory:
        numCorrect += 1

    self.assertEqual(numCorrect,2)

    # Cleanup the temp files
    os.unlink(os.path.join(tmpDir,'0','im0.png'))
    os.unlink(os.path.join(tmpDir,'1','im1.png'))
    os.removedirs(os.path.join(tmpDir,'0'))
    os.removedirs(os.path.join(tmpDir,'1'))
class ClaClassifier():

    def __init__(self, net_structure, sensor_params, dest_region_params, class_encoder_params):

        self.run_number = 0

        # for classifier
        self.classifier_encoder_list = {}
        self.classifier_input_list   = {}
        self.prevPredictedColumns    = {}

        # TODO: 消したいパラメータ
        self.predict_value = class_encoder_params.keys()[0]
        self.predict_step  = 0


        # default param
        self.default_params = {
            'SP_PARAMS':  {
                "spVerbosity": 0,
                "spatialImp": "cpp",
                "globalInhibition": 1,
                "columnCount": 2024,
                "inputWidth": 0,             # set later
                "numActiveColumnsPerInhArea": 20,
                "seed": 1956,
                "potentialPct": 0.8,
                "synPermConnected": 0.1,
                "synPermActiveInc": 0.05,
                "synPermInactiveDec": 0.0005,
                "maxBoost": 2.0,
                },
            'TP_PARAMS': {
                "verbosity": 0,
                "columnCount": 2024,
                "cellsPerColumn": 32,
                "inputWidth": 2024,
                "seed": 1960,
                "temporalImp": "cpp",
                "newSynapseCount": 20,
                "maxSynapsesPerSegment": 32,
                "maxSegmentsPerCell": 128,
                "initialPerm": 0.21,
                "permanenceInc": 0.2,
                "permanenceDec": 0.1,
                "globalDecay": 0.0,
                "maxAge": 0,
                "minThreshold": 12,
                "activationThreshold": 16,
                "outputType": "normal",
                "pamLength": 1,
                },
            'CLASSIFIER_PARAMS':  {
                "clVerbosity": 0,
                "alpha": 0.005,
                "steps": "0"
                }
            }

        # tp
        self.tp_enable = True

        # net structure
        self.net_structure = OrderedDict()
        self.net_structure['sensor3'] = ['region1']
        self.net_structure['region1'] = ['region2']

        self.net_structure = net_structure

        # region change params
        self.dest_region_params = dest_region_params

        # sensor change params
        self.sensor_params = sensor_params

        self.class_encoder_params = class_encoder_params

        self._createNetwork()


    def _makeRegion(self, name, params):
        sp_name    = "sp_" + name
        if self.tp_enable:
            tp_name    = "tp_" + name
        class_name = "class_" + name

        # addRegion
        self.network.addRegion(sp_name, "py.SPRegion", json.dumps(params['SP_PARAMS']))
        if self.tp_enable:
            self.network.addRegion(tp_name, "py.TPRegion", json.dumps(params['TP_PARAMS']))
        self.network.addRegion( class_name, "py.CLAClassifierRegion", json.dumps(params['CLASSIFIER_PARAMS']))

        encoder = MultiEncoder()
        encoder.addMultipleEncoders(self.class_encoder_params)
        self.classifier_encoder_list[class_name]  = encoder
        if self.tp_enable:
            self.classifier_input_list[class_name]    = tp_name
        else:
            self.classifier_input_list[class_name]    = sp_name

    def _linkRegion(self, src_name, dest_name):
        sensor     =  src_name
        sp_name    = "sp_" + dest_name
        tp_name    = "tp_" + dest_name
        class_name = "class_" + dest_name

        if self.tp_enable:
            self.network.link(sensor, sp_name, "UniformLink", "")
            self.network.link(sp_name, tp_name, "UniformLink", "")
            self.network.link(tp_name, class_name, "UniformLink", "")
        else:
            self.network.link(sensor, sp_name, "UniformLink", "")
            self.network.link(sp_name, class_name, "UniformLink", "")


    def _initRegion(self, name):
        sp_name = "sp_"+ name
        tp_name = "tp_"+ name
        class_name = "class_"+ name

        # setting sp
        SP = self.network.regions[sp_name]
        SP.setParameter("learningMode", True)
        SP.setParameter("anomalyMode", True)

        # # setting tp
        if self.tp_enable:
            TP = self.network.regions[tp_name]
            TP.setParameter("topDownMode", False)
            TP.setParameter("learningMode", True)
            TP.setParameter("inferenceMode", True)
            TP.setParameter("anomalyMode", False)

        # classifier regionを定義.
        classifier = self.network.regions[class_name]
        classifier.setParameter('inferenceMode', True)
        classifier.setParameter('learningMode', True)


    def _createNetwork(self):

        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            if update is None:
                return None
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update


        self.network = Network()

        # check
        # if self.selectivity not in self.dest_region_params.keys():
        #     raise Exception, "There is no selected region : " + self.selectivity
        if not len(self.net_structure.keys()) == len(set(self.net_structure.keys())):
            raise Exception, "There is deplicated net_structure keys : " + self.net_structure.keys()

        # sensor
        for sensor_name, params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor", json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            #params = deepupdate(cn.SENSOR_PARAMS, params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders( params )
            sensor.encoder         = encoder
            sensor.dataSource      = DataBuffer()


        # network
        print 'create element ...'
        for name in self.dest_region_params.keys():
            change_params = self.dest_region_params[name]
            params = deepupdate(self.default_params, change_params)
            # input width
            input_width = 0
            for source in [s for s,d in self.net_structure.items() if name in d]:
                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    input_width += sensor.encoder.getWidth()
                else:
                    input_width += params['TP_PARAMS']['cellsPerColumn'] * params['TP_PARAMS']['columnCount']

            params['SP_PARAMS']['inputWidth'] = input_width
            self._makeRegion(name, params)

        # link
        print 'link network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                if source in self.sensor_params.keys():
                    self._linkRegion(source, dest)
                else:
                    if self.tp_enable:
                        self._linkRegion("tp_" + source, dest)
                    else:
                        self._linkRegion("sp_" + source, dest)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in self.dest_region_params.keys():
            self._initRegion(name)

        return


    #@profile
    def run(self, input_data, learn=True, class_learn=True,learn_layer=None):
        """
        networkの実行.
        学習したいときは, learn=True, ftypeを指定する.
        予測したいときは, learn=False, ftypeはNoneを指定する.
        学習しているときも, 予測はしているがな.

        input_data = {'xy_value': [1.0, 2.0], 'ftype': 'sin'}
        """

        self.enable_learning_mode(learn, learn_layer)
        self.enable_class_learning_mode(class_learn)

        self.run_number += 1

        # calc encoder, SP, TP
        for sensor_name in self.sensor_params.keys():
            self.network.regions[sensor_name].getSelf().dataSource.push(input_data)
        self.network.run(1)
        #self.layer_output(input_data)
        #self.debug(input_data)


        # learn classifier
        inferences = {}
        for name in self.dest_region_params.keys():
            class_name = "class_" + name
            inferences['classifier_'+name]   = self._learn_classifier_multi(class_name, actValue=input_data[self.predict_value], pstep=self.predict_step)



        # anomaly
        #inferences["anomaly"] = self._calc_anomaly()

        return inferences


    def _learn_classifier_multi(self, region_name, actValue=None, pstep=0):
        """
        classifierの計算を行う.

        直接customComputeを呼び出さずに, network.runの中でやりたいところだけど,
        計算した内容の取り出し方法がわからない.
        """

        # TODO: networkとclassifierを完全に切り分けたいな.
        #       networkでは, sensor,sp,tpまで計算を行う.
        #       その計算結果の評価/利用は外に出す.

        classifier     = self.network.regions[region_name]
        encoder        = self.classifier_encoder_list[region_name].getEncoderList()[0]
        class_input    = self.classifier_input_list[region_name]
        tp_bottomUpOut = self.network.regions[class_input].getOutputData("bottomUpOut").nonzero()[0]
        #tp_bottomUpOut = self.network.regions["TP"].getSelf()._tfdr.infActiveState['t'].reshape(-1).nonzero()[0]

        if actValue is not None:
            bucketIdx = encoder.getBucketIndices(actValue)[0]
            classificationIn = {
                    'bucketIdx': bucketIdx,
                    'actValue': actValue
                    }
        else:
            classificationIn = {'bucketIdx': 0,'actValue': 'no'}
        clResults = classifier.getSelf().customCompute(
                recordNum=self.run_number,
                patternNZ=tp_bottomUpOut,
                classification=classificationIn
                )

        inferences= self._get_inferences(clResults, pstep, summary_tyep='sum')

        return inferences

    def _get_inferences(self, clResults, steps, summary_tyep='sum'):
        """
        classifierの計算結果を使いやすいように変更するだけ.
        """

        likelihoodsVec = clResults[steps]
        bucketValues   = clResults['actualValues']

        likelihoodsDict = defaultdict(int)
        bestActValue = None
        bestProb = None

        if summary_tyep == 'sum':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                likelihoodsDict[actValue] += prob
                if bestProb is None or likelihoodsDict[actValue] > bestProb:
                    bestProb = likelihoodsDict[actValue]
                    bestActValue = actValue

        elif summary_tyep == 'best':
            for (actValue, prob) in zip(bucketValues, likelihoodsVec):
                if bestProb is None or prob > bestProb:
                    likelihoodsDict[actValue] = prob
                    bestProb = prob
                    bestActValue = actValue

        return {'likelihoodsDict': likelihoodsDict, 'best': {'value': bestActValue, 'prob':bestProb}}


    def _calc_anomaly(self):
        """
        各層のanomalyを計算
        """

        score = 0
        anomalyScore = {}
        for name in self.dest_region_params.keys():
            #sp_bottomUpOut = self.network.regions["sp_"+name].getOutputData("bottomUpOut").nonzero()[0]
            sp_bottomUpOut = self.network.regions["tp_"+name].getInputData("bottomUpIn").nonzero()[0]

            if self.prevPredictedColumns.has_key(name):
                score = computeAnomalyScore(sp_bottomUpOut, self.prevPredictedColumns[name])
            #topdown_predict = self.network.regions["TP"].getSelf()._tfdr.topDownCompute().copy().nonzero()[0]
            topdown_predict = self.network.regions["tp_"+name].getSelf()._tfdr.topDownCompute().nonzero()[0]
            self.prevPredictedColumns[name] = copy.deepcopy(topdown_predict)

            anomalyScore[name] = score

        return anomalyScore

    def reset(self):
        """
        reset sequence
        """
        # for name in self.dest_region_params.keys():
        #     self.network.regions["tp_"+name].getSelf().resetSequenceStates()
        return

        # for sensor_name in self.sensor_params.keys():
        #     sensor = self.network.regions[sensor_name].getSelf()
        #     sensor.dataSource = DataBuffer()

    def enable_class_learning_mode(self, enable):
        for name in self.dest_region_params.keys():
            self.network.regions["class_"+name].setParameter("learningMode", enable)

    def enable_learning_mode(self, enable, layer_name = None):
        """
        各層のSP, TP, ClassifierのlearningModeを変更
        """
        if layer_name is None:
            for name in self.dest_region_params.keys():
                self.network.regions["sp_"+name].setParameter("learningMode", enable)
                if self.tp_enable:
                    self.network.regions["tp_"+name].setParameter("learningMode", enable)
                self.network.regions["class_"+name].setParameter("learningMode", enable)
        else:
            for name in self.dest_region_params.keys():
                self.network.regions["sp_"+name].setParameter("learningMode", not enable)
                if self.tp_enable:
                    self.network.regions["tp_"+name].setParameter("learningMode", not enable)
                self.network.regions["class_"+name].setParameter("learningMode", not enable)
            for name in layer_name:
                self.network.regions["sp_"+name].setParameter("learningMode", enable)
                if self.tp_enable:
                    self.network.regions["tp_"+name].setParameter("learningMode", enable)
                self.network.regions["class_"+name].setParameter("learningMode", enable)


    def print_inferences(self, input_data, inferences):
        """
        計算結果を出力する
        """

        # print "%10s, %10s, %1s" % (
        #         int(input_data['xy_value'][0]),
        #         int(input_data['xy_value'][1]),
        #         input_data['label'][:1]),
        print "%5s" % (
                input_data['label']),

        try:
            for name in sorted(self.dest_region_params.keys()):
                print "%5s" % (inferences['classifier_'+name]['best']['value']),

            for name in sorted(self.dest_region_params.keys()):
                print "%6.4f," % (inferences['classifier_'+name]['likelihoodsDict'][input_data[self.predict_value]]),
        except:
            pass

        # for name in sorted(self.dest_region_params.keys()):
        #     print "%3.2f," % (inferences["anomaly"][name]),

        # for name in sorted(self.dest_region_params.keys()):
        #     print "%5s," % name,

        print

    def layer_output(self, input_data, region_name=None):
        if region_name is not None:
            Region = self.network.regions[region_name]
            print Region.getOutputData("bottomUpOut").nonzero()[0]
            return

        for name in self.dest_region_params.keys():
            SPRegion = self.network.regions["sp_"+name]
            if self.tp_enable:
                TPRegion = self.network.regions["tp_"+name]

            print "#################################### ", name
            print
            print "==== SP layer ===="
            print "input:  ", SPRegion.getInputData("bottomUpIn").nonzero()[0][:20]
            print "output: ", SPRegion.getOutputData("bottomUpOut").nonzero()[0][:20]
            print
            if self.tp_enable:
                print "==== TP layer ===="
                print "input:  ", TPRegion.getInputData("bottomUpIn").nonzero()[0][:20]
                print "output: ", TPRegion.getOutputData("bottomUpOut").nonzero()[0][:20]
                print
            print "==== Predict ===="
            print TPRegion.getSelf()._tfdr.topDownCompute().copy().nonzero()[0][:20]
            print

    def save(self, path):
        import pickle
        with open(path, 'wb') as modelPickleFile:
            pickle.dump(self, modelPickleFile)
예제 #51
0
class HTM():
    def __init__(self, dataSource, rdse_resolution, params=None, verbosity=3):
        """Create the Network instance.

        The network has a sensor region reading data from `dataSource` and passing
        the encoded representation to an SPRegion. The SPRegion output is passed to
        a TMRegion.

        :param dataSource: a RecordStream instance to get data from
        :param rdse_resolution: float, resolution of Random Distributed Scalar Encoder
        :param cellsPerMiniColumn: int, number of cells per mini-column. Default=32
        """
        DATE = '{}'.format(strftime('%Y-%m-%d_%H:%M:%S', localtime()))
        self.log_file = join(
            '../logs/', 'HTM-{}-({}RDSEres)-datasource-{}.log'.format(
                DATE, rdse_resolution, str(dataSource)))
        log.basicConfig(format='[%(asctime)s] %(message)s',
                        datefmt='%m/%d/%Y %H:%M:%S %p',
                        filename=self.log_file,
                        level=log.DEBUG)
        self.streaming = False
        self.setVerbosity(verbosity)

        self.modelParams = {}
        log.debug("...loading params from {}...".format(_PARAMS_PATH))
        try:
            with open(_PARAMS_PATH, "r") as f:
                self.modelParams = yaml.safe_load(f)["modelParams"]
        except:
            with open(os.path.join("..", _PARAMS_PATH), "r") as f:
                self.modelParams = yaml.safe_load(f)["modelParams"]
        # Create a network that will hold the regions.
        self.network = Network()
        # Add a sensor region.
        self.network.addRegion("sensor", "py.RecordSensor", '{}')
        # Set the encoder and data source of the sensor region.
        self.sensorRegion = self.network.regions["sensor"].getSelf()
        #sensorRegion.encoder = createEncoder(modelParams["sensorParams"]["encoders"])
        self.encoder = RDSEEncoder(rdse_resolution)
        self.sensorRegion.encoder = self.encoder.get_encoder()
        self.sensorRegion.dataSource = TimeSeriesStream(dataSource)
        self.network.regions["sensor"].setParameter("predictedField", "series")

        # Adjust params
        # Make sure the SP input width matches the sensor region output width.
        self.modelParams["spParams"][
            "inputWidth"] = self.sensorRegion.encoder.getWidth()
        if not params == None:
            for key, value in params.iteritems():
                if key == "clParams" or key == "spParams" or key == "tmParams":
                    for vkey, vvalue in value.iteritems():
                        #print(key, vkey, vvalue)
                        self.modelParams[key][vkey] = vvalue
        log.debug("xxx HTM Params: xxx\n{}\n".format(
            json.dumps(self.modelParams, sort_keys=True, indent=4)))
        # Add SP and TM regions.
        self.network.addRegion("spatialPoolerRegion", "py.SPRegion",
                               json.dumps(self.modelParams["spParams"]))
        self.network.addRegion("temporalPoolerRegion", "py.TMRegion",
                               json.dumps(self.modelParams["tmParams"]))
        # Add a classifier region.
        clName = "py.%s" % self.modelParams["clParams"].pop("regionName")
        self.network.addRegion("classifier", clName,
                               json.dumps(self.modelParams["clParams"]))
        # link regions
        self.linkSensorToClassifier()
        self.linkSensorToSpatialPooler()
        self.linkSpatialPoolerToTemporalPooler()
        self.linkTemporalPoolerToClassifier()
        self.linkResets()
        # possibly do reset links here (says they are optional
        self.network.initialize()
        self.turnInferenceOn()
        self.turnLearningOn()

    def __del__(self):
        """ closes all loggers """
        try:
            logger = log.getLogger()
            handlers = logger.handlers[:]
            for handler in handlers:
                try:
                    handler.close()
                    logger.removeHandler(handler)
                except:
                    pass
        except:
            pass

    def __str__(self):
        spRegion = self.network.getRegionsByType(SPRegion)[0]
        sp = spRegion.getSelf().getAlgorithmInstance()
        _str = "spatial pooler region inputs: {0}\n".format(
            spRegion.getInputNames())
        _str += "spatial pooler region outputs: {0}\n".format(
            spRegion.getOutputNames())
        _str += "# spatial pooler columns: {0}\n\n".format(sp.getNumColumns())

        tmRegion = self.network.getRegionsByType(TMRegion)[0]
        tm = tmRegion.getSelf().getAlgorithmInstance()
        _str += "temporal memory region inputs: {0}\n".format(
            tmRegion.getInputNames())
        _str += "temporal memory region outputs: {0}\n".format(
            tmRegion.getOutputNames())
        _str += "# temporal memory columns: {0}\n".format(tm.numberOfCols)
        return _str

    def getClassifierResults(self):
        """Helper function to extract results for all prediction steps."""
        classifierRegion = self.network.regions["classifier"]
        actualValues = classifierRegion.getOutputData("actualValues")
        probabilities = classifierRegion.getOutputData("probabilities")
        steps = classifierRegion.getSelf().stepsList
        N = classifierRegion.getSelf().maxCategoryCount
        results = {step: {} for step in steps}
        for i in range(len(steps)):
            # stepProbabilities are probabilities for this prediction step only.
            stepProbabilities = probabilities[i * N:(i + 1) * N - 1]
            mostLikelyCategoryIdx = stepProbabilities.argmax()
            predictedValue = actualValues[mostLikelyCategoryIdx]
            predictionConfidence = stepProbabilities[mostLikelyCategoryIdx]
            results[steps[i]]["predictedValue"] = float(predictedValue)
            results[steps[i]]["predictionConfidence"] = float(
                predictionConfidence)
        log.debug("Classifier Reults:\n{}".format(
            json.dumps(results, sort_keys=True, indent=4)))
        return results

    def getCurrSeries(self):
        return self.network.regions["sensor"].getOutputData("sourceOut")[0]

    def getStepsList(self):
        return self.network.regions["classifier"].getSelf().stepsList

    def getTimeSeriesStream(self):
        return self.network.regions["sensor"].getSelf().dataSource

    def setDatasource(self, new_source):
        self.network.regions["sensor"].getSelf().dataSource = TimeSeriesStream(
            new_source)

    def linkResets(self):
        """createResetLink(network, "sensor", "spatialPoolerRegion")
        createResetLink(network, "sensor", "temporalPoolerRegion")"""
        self.network.link("sensor",
                          "spatialPoolerRegion",
                          "UniformLink",
                          "",
                          srcOutput="resetOut",
                          destInput="resetIn")
        self.network.link("sensor",
                          "temporalPoolerRegion",
                          "UniformLink",
                          "",
                          srcOutput="resetOut",
                          destInput="resetIn")

    def linkSensorToClassifier(self):
        """Create required links from a sensor region to a classifier region."""
        self.network.link("sensor",
                          "classifier",
                          "UniformLink",
                          "",
                          srcOutput="bucketIdxOut",
                          destInput="bucketIdxIn")
        self.network.link("sensor",
                          "classifier",
                          "UniformLink",
                          "",
                          srcOutput="actValueOut",
                          destInput="actValueIn")
        self.network.link("sensor",
                          "classifier",
                          "UniformLink",
                          "",
                          srcOutput="categoryOut",
                          destInput="categoryIn")

    def linkSensorToSpatialPooler(self):
        self.network.link("sensor",
                          "spatialPoolerRegion",
                          "UniformLink",
                          "",
                          srcOutput="dataOut",
                          destInput="bottomUpIn")

    def linkSpatialPoolerToTemporalPooler(self):
        """Create a feed-forward link between 2 regions: spatialPoolerRegion -> temporalPoolerRegion"""
        self.network.link("spatialPoolerRegion",
                          "temporalPoolerRegion",
                          "UniformLink",
                          "",
                          srcOutput="bottomUpOut",
                          destInput="bottomUpIn")

    def linkTemporalPoolerToClassifier(self):
        """Create a feed-forward link between 2 regions: temporalPoolerRegion -> classifier"""
        self.network.link("temporalPoolerRegion",
                          "classifier",
                          "UniformLink",
                          "",
                          srcOutput="bottomUpOut",
                          destInput="bottomUpIn")

    def setVerbosity(self, level):
        """
        Sets the level of print statements/logging (verbosity)
        * 3 == DEBUG
        * 2 == VERBOSE
        * 1 == WARNING
        """
        if self.log_file == None:  # if there's no log file, make one
            DATE = '{}'.format(strftime('%Y-%m-%d_%H:%M:%S', localtime()))
            self.log_file = join(
                '../logs/',
                'HTM-{}-({}CPMC-{}RDSEres)-datasource-{}.log'.format(
                    DATE, self.modelParams["tmParams"]["cellsPerColumn"],
                    self.encoder.get_resolution(),
                    str(self.sensorRegion.dataSource)))
            log.basicConfig(format='[%(asctime)s] %(message)s',
                            datefmt='%m/%d/%Y %H:%M:%S %p',
                            filename=self.log_file,
                            level=log.DEBUG)

        if level >= 4 and not self.streaming:
            log.getLogger().addHandler(log.StreamHandler())
            self.streaming = True
        if level >= 3:
            log.getLogger().setLevel(log.DEBUG)
        elif level >= 2:
            log.getLogger().setLevel(log.VERBOSE)
        elif level >= 1:
            log.getLogger().setLevel(log.WARNING)

    def runNetwork(self, learning=True):
        DATE = '{}'.format(strftime('%Y-%m-%d_%H:%M:%S', localtime()))
        _OUTPUT_PATH = "../outputs/HTMOutput-{}-{}.csv".format(
            DATE, self.network.regions["sensor"].getSelf().dataSource)
        self.sensorRegion.dataSource.rewind()

        # Set predicted field
        self.network.regions["sensor"].setParameter("predictedField", "series")

        if learning == True:
            # Enable learning for all regions.
            self.turnLearningOn()
        elif learning == False:
            # Enable learning for all regions.
            self.turnLearningOff()
        else:
            self.turnLearningOff()
            self.turnLearningOn(learning)
        self.turnInferenceOn()

        _model = self.network.regions["sensor"].getSelf().dataSource

        with open(_OUTPUT_PATH, "w") as outputFile:
            writer = csv.writer(outputFile)
            log.info("Writing output to {}".format(_OUTPUT_PATH))
            steps = self.getStepsList()
            header_row = ["Time Step", "Series"]
            for step in steps:
                header_row.append("{} Step Pred".format(step))
                header_row.append("{} Step Pred Conf".format(step))
            writer.writerow(header_row)
            results = []
            one_preds = []
            for i in range(len(_model)):
                # Run the network for a single iteration
                self.network.run(1)

                series = self.network.regions["sensor"].getOutputData(
                    "sourceOut")[0]
                predictionResults = self.getClassifierResults()
                result = [_model.getBookmark(), series]
                one_preds.append(predictionResults[1]["predictedValue"])
                for key, value in predictionResults.iteritems():
                    result.append(value["predictedValue"])
                    result.append(value["predictionConfidence"] * 100)
                #print "{:6}: 1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(*result)
                results.append(result)
                writer.writerow(result)
                outputFile.flush()
            return one_preds, results

    def runWithMode(self,
                    mode,
                    error_method="rmse",
                    weights={
                        1: 1.0,
                        5: 1.0
                    },
                    normalize_error=False):
        '''
        Modes:
        * "strain" - Learning on spatial pool, on training set
        * "train" - Learning, on training set
        * "test" - No learning, on test set
        * "eval" - Learning, on eval set
        '''
        mode = mode.lower()
        error_method = error_method.lower()
        log.debug(
            "entered `runWithMode` with with:\n  mode: {}\n    error_method: {}"
            .format(mode, error_method))

        _model = self.getTimeSeriesStream()

        if mode == "strain":
            self.turnLearningOff("ct")
            self.turnLearningOn("s")
        else:
            self.turnLearningOn()
        self.turnInferenceOn()

        results = {}

        steps = self.getStepsList()
        for step in steps:
            results[step] = 0
        predictions = {}
        for step in steps:
            predictions[step] = [None] * step

        last_prediction = None
        five_pred = [None] * 5  # list of 5 Nones
        if mode == "strain" or mode == "train":
            _model.set_to_train_theta()
            while _model.in_train_set():
                temp = self.run_with_mode_one_iter(error_method, results,
                                                   predictions)
                results = temp[0]
                predictions = temp[1]
        elif mode == "test":
            _model.set_to_test_theta()
            while _model.in_test_set():
                temp = self.run_with_mode_one_iter(error_method, results,
                                                   predictions)
                results = temp[0]
                predictions = temp[1]
        elif mode == "eval":
            _model.set_to_eval_theta()
            while _model.in_eval_set():
                temp = self.run_with_mode_one_iter(error_method, results,
                                                   predictions)
                results = temp[0]
                predictions = temp[1]
            steps = self.getStepsList()
            for step in steps:
                weights[step] = 0
            weights[
                1] = 1  # weights for eval hard-coded to just look at one-step prediction for now

            # normalize result over length of evaluation set
            for key in results:
                results[key] /= (self.sensorRegion.dataSource.len_eval_set() -
                                 1)

        # preprocess weights to put in zero weights
        for key, value in results.iteritems():
            try:
                weights[key]
            except:
                weights[key] = 0

        for key, value in results.iteritems():
            results[key] = results[key] * weights[key]

        if normalize_error == True:
            _range = self.getTimeSeriesStream().get_range()
            if not _range == None:
                for key, value in results.iteritems():
                    results[key] = value / _range

        return results

    def run_with_mode_one_iter(self, error_method, results, predictions=None):
        self.network.run(1)
        series = self.getCurrSeries()

        for key, value in results.iteritems():
            if predictions[key][0] == None:
                pass
            elif error_method == "rmse":
                results[key] += sqrt((series - predictions[key][0])**2)
            elif error_method == "binary":
                if not series == predictions[key][0]:
                    results[key] += 1

        # update predictions
        classRes = self.getClassifierResults()
        for key, value in predictions.iteritems():
            for i in range(key - 1):
                value[i] = value[i + 1]  # shift predictions down one
            value[key - 1] = classRes[key]["predictedValue"]

        return (results, predictions)

    def setRDSEResolution(self, new_res):
        self.encoder = RDSEEncoder(new_res)

    def train(self,
              error_method="rmse",
              sibt=0,
              iter_per_cycle=1,
              max_cycles=20,
              weights={
                  1: 1.0,
                  5: 1.0
              },
              normalize_error=False):
        """
        Trains the HTM on `dataSource`

        :param  error_method - the metric for calculating error ("rmse" root mean squared error or "binary")
        :param  sibt - spatial (pooler) iterations before temporal (pooler)
        """
        for i in range(sibt):
            log.debug(
                "\nxxxxx Iteration {}/{} of the Spatial Pooler Training xxxxx".
                format(i + 1, sibt))
            # train on spatial pooler
            log.debug(
                "Error for spatial training iteration {} was {} with {} error method"
                .format(
                    i,
                    self.runWithMode("strain", error_method, weights,
                                     normalize_error), error_method))
        log.info("\nExited spatial pooler only training loop")
        last_error = 0  # set to infinity error so you keep training the first time
        curr_error = -1
        counter = 0
        log.info("Entering full training loop")
        while (fcompare(curr_error, last_error) == -1
               and counter < max_cycles):
            log.debug(
                "\n++++++++++ Cycle {} of the full training loop +++++++++\n".
                format(counter))
            last_error = curr_error
            curr_error = 0
            for i in range(int(iter_per_cycle)):
                log.debug("\n----- Iteration {}/{} of Cycle {} -----\n".format(
                    i + 1, iter_per_cycle, counter))
                log.debug(
                    "Error for full training cycle {}, iteration {} was {} with {} error method"
                    .format(
                        counter, i,
                        self.runWithMode("train", error_method, weights,
                                         normalize_error), error_method))
                result = self.runWithMode("test", error_method, weights,
                                          normalize_error)
                for key, value in result.iteritems():
                    curr_error += value
            log.debug("Cycle {} - last: {}    curr: {}".format(
                counter, last_error, curr_error))
            counter += 1
            if last_error == -1:
                last_error = float("inf")
        self.sensorRegion.dataSource.rewind()
        final_error = self.runWithMode("eval", error_method, weights,
                                       normalize_error)
        log.info("FINAL ERROR: {}".format(final_error[1]))
        return final_error[1]

    def turnInferenceOn(self):
        log.debug("Inference enabled for all regions")
        self.network.regions["spatialPoolerRegion"].setParameter(
            "inferenceMode", 1)
        self.network.regions["temporalPoolerRegion"].setParameter(
            "inferenceMode", 1)
        self.network.regions["classifier"].setParameter("inferenceMode", 1)

    def turnLearningOn(self, turnOn="cst"):
        """
        Turns learning on for certain segments

        :param turnOn - a string of characters representing the segments you'd like to turn on
        * c ---> classifier
        * s ---> spatial pooler
        * t ---> temporal pooler
        """
        for i in range(len(turnOn)):
            target = turnOn[0].lower()
            turnOn = turnOn[1:]
            if target == "c":
                log.debug("Learning enabled for classifier")
                self.network.regions["classifier"].setParameter(
                    "learningMode", 1)
            elif target == "s":
                log.debug("Learning enabled for spatial pooler region")
                self.network.regions["spatialPoolerRegion"].setParameter(
                    "learningMode", 1)
            elif target == "t":
                log.debug("Learning enabled for temporal pooler region")
                self.network.regions["temporalPoolerRegion"].setParameter(
                    "learningMode", 1)

    def turnLearningOff(self, turnOff="cst"):
        """
        Turns learning off for certain segments

        :param turnOff - a string of characters representing the segments you'd like to turn off
        * c ---> classifier
        * s ---> spatial pooler
        * t ---> temporal pooler
        """
        for i in range(len(turnOff)):
            target = turnOff[0].lower()
            turnOff = turnOff[1:]
            if target == "c":
                log.debug("Learning disabled for classifier")
                self.network.regions["classifier"].setParameter(
                    "learningMode", 0)
            elif target == "s":
                log.debug("Learning disabled for spatial pooler region")
                self.network.regions["spatialPoolerRegion"].setParameter(
                    "learningMode", 0)
            elif target == "t":
                log.debug("Learning disabled for temporal pooler region")
                self.network.regions["temporalPoolerRegion"].setParameter(
                    "learningMode", 0)