def _addRegion(self, src_name, dest_name, params):

        sensor     =  src_name
        sp_name    = "sp_" + dest_name
        tp_name    = "tp_" + dest_name
        class_name = "class_" + dest_name

        try:
            self.network.regions[sp_name]
            self.network.regions[tp_name]
            self.network.regions[class_name]
            self.network.link(sensor, sp_name, "UniformLink", "")

        except Exception as e:
            # sp
            self.network.addRegion(sp_name, "py.SPRegion", json.dumps(params['SP_PARAMS']))
            self.network.link(sensor, sp_name, "UniformLink", "")

            # tp
            self.network.addRegion(tp_name, "py.TPRegion", json.dumps(params['TP_PARAMS']))
            self.network.link(sp_name, tp_name, "UniformLink", "")

            # class
            self.network.addRegion( class_name, "py.CLAClassifierRegion", json.dumps(params['CLASSIFIER_PARAMS']))
            self.network.link(tp_name, class_name, "UniformLink", "")

            encoder = MultiEncoder()
            encoder.addMultipleEncoders(params['CLASSIFIER_ENCODE_PARAMS'])
            self.classifier_encoder_list[class_name]  = encoder
            self.classifier_input_list[class_name]    = tp_name
def createEncoder():
    """
  Creates and returns a #MultiEncoder including a ScalarEncoder for
  energy consumption and a DateEncoder for the time of the day.

  @see nupic/encoders/__init__.py for type to file-name mapping
  @see nupic/encoders for encoder source files
  """
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
        "consumption": {
            "fieldname": u"consumption",
            "type": "ScalarEncoder",
            "name": u"consumption",
            "minval": 0.0,
            "maxval": 100.0,
            "clipInput": True,
            "w": 21,
            "n": 500
        },
        "timestamp_timeOfDay": {
            "fieldname": u"timestamp",
            "type": "DateEncoder",
            "name": u"timestamp_timeOfDay",
            "timeOfDay": (21, 9.5)
        }
    })
    return encoder
Beispiel #3
0
def _createNetwork():
    """Create network with one RecordSensor region."""
    network = Network()
    network.addRegion('sensor', 'py.RecordSensor', '{}')
    sensorRegion = network.regions['sensor'].getSelf()

    # Add an encoder.
    encoderParams = {
        'consumption': {
            'fieldname': 'consumption',
            'resolution': 0.88,
            'seed': 1,
            'name': 'consumption',
            'type': 'RandomDistributedScalarEncoder'
        }
    }

    encoder = MultiEncoder()
    encoder.addMultipleEncoders(encoderParams)
    sensorRegion.encoder = encoder

    # Add a data source.
    testDir = os.path.dirname(os.path.abspath(__file__))
    inputFile = os.path.join(testDir, 'fixtures', 'gymdata-test.csv')
    dataSource = FileRecordStream(streamID=inputFile)
    sensorRegion.dataSource = dataSource

    # Get and set what field index we want to predict.
    predictedIdx = dataSource.getFieldNames().index('consumption')
    network.regions['sensor'].setParameter('predictedFieldIdx', predictedIdx)

    return network
def createEncoder(newEncoders):
  """
  Creates and returns a MultiEncoder.

  @param newEncoders    (dict)          Keys are the encoders' names, values are
      dicts of the params; an example is shown below.

  @return encoder       (MultiEncoder)  See nupic.encoders.multi.py.

  Example input:
    {"energy": {"fieldname": u"energy",
                "type": "ScalarEncoder",
                "name": u"consumption",
                "minval": 0.0,
                "maxval": 100.0,
                "w": 21,
                "n": 500},
     "timestamp": {"fieldname": u"timestamp",
                   "type": "DateEncoder",
                   "name": u"timestamp_timeOfDay",
                   "timeOfDay": (21, 9.5)},
    }
  """
  encoder = MultiEncoder()
  encoder.addMultipleEncoders(newEncoders)
  return encoder
Beispiel #5
0
def _createEncoder():
    """Create the encoder instance for our test and return it."""
    encoder = MultiEncoder()
    encoder.addMultipleEncoders(
        {
            "timestamp": dict(fieldname="timestamp", type="DateEncoder", timeOfDay=(5, 5)),
            "attendeeCount": dict(
                fieldname="attendeeCount",
                type="ScalarEncoder",
                name="attendeeCount",
                minval=0,
                maxval=270,
                clipInput=True,
                w=5,
                resolution=10,
            ),
            "consumption": dict(
                fieldname="consumption",
                type="ScalarEncoder",
                name="consumption",
                minval=0,
                maxval=115,
                clipInput=True,
                w=5,
                resolution=5,
            ),
        }
    )

    return encoder
def _createNetwork():
  """Create a network with a RecordSensor region and a SDRClassifier region"""

  network = Network()
  network.addRegion('sensor', 'py.RecordSensor', '{}')
  network.addRegion('classifier', 'py.SDRClassifierRegion', '{}')
  _createSensorToClassifierLinks(network, 'sensor', 'classifier')

  # Add encoder to sensor region.
  sensorRegion = network.regions['sensor'].getSelf()
  encoderParams = {'consumption': {'fieldname': 'consumption',
                                   'resolution': 0.88,
                                   'seed': 1,
                                   'name': 'consumption',
                                   'type': 'RandomDistributedScalarEncoder'}}

  encoder = MultiEncoder()
  encoder.addMultipleEncoders(encoderParams)
  sensorRegion.encoder = encoder

  # Add data source.
  testDir = os.path.dirname(os.path.abspath(__file__))
  inputFile = os.path.join(testDir, 'fixtures', 'gymdata-test.csv')
  dataSource = FileRecordStream(streamID=inputFile)
  sensorRegion.dataSource = dataSource

  # Get and set what field index we want to predict.
  predictedIdx = dataSource.getFieldNames().index('consumption')
  network.regions['sensor'].setParameter('predictedFieldIdx', predictedIdx)

  return network
def createEncoder(newEncoders):
  """
  Creates and returns a MultiEncoder.

  @param newEncoders    (dict)          Keys are the encoders' names, values are
      dicts of the params; an example is shown below.

  @return encoder       (MultiEncoder)  See nupic.encoders.multi.py.

  Example input:
    {"energy": {"fieldname": u"energy",
                "type": "ScalarEncoder",
                "name": u"consumption",
                "minval": 0.0,
                "maxval": 100.0,
                "w": 21,
                "n": 500},
     "timestamp": {"fieldname": u"timestamp",
                   "type": "DateEncoder",
                   "name": u"timestamp_timeOfDay",
                   "timeOfDay": (21, 9.5)},
    }
  """
  encoder = MultiEncoder()
  encoder.addMultipleEncoders(newEncoders)
  return encoder
def _createNetwork():
  """Create network with one RecordSensor region."""
  network = Network()
  network.addRegion('sensor', 'py.RecordSensor', '{}')
  sensorRegion = network.regions['sensor'].getSelf()

  # Add an encoder.
  encoderParams = {'consumption': {'fieldname': 'consumption',
                                   'resolution': 0.88,
                                   'seed': 1,
                                   'name': 'consumption',
                                   'type': 'RandomDistributedScalarEncoder'}}

  encoder = MultiEncoder()
  encoder.addMultipleEncoders(encoderParams)
  sensorRegion.encoder = encoder

  # Add a data source.
  testDir = os.path.dirname(os.path.abspath(__file__))
  inputFile = os.path.join(testDir, 'fixtures', 'gymdata-test.csv')
  dataSource = FileRecordStream(streamID=inputFile)
  sensorRegion.dataSource = dataSource

  # Get and set what field index we want to predict.
  network.regions['sensor'].setParameter('predictedField', 'consumption')

  return network
Beispiel #9
0
def _createNetwork():
    """Create a network with a RecordSensor region and a SDRClassifier region"""

    network = Network()
    network.addRegion('sensor', 'py.RecordSensor', '{}')
    network.addRegion('classifier', 'py.SDRClassifierRegion', '{}')
    _createSensorToClassifierLinks(network, 'sensor', 'classifier')

    # Add encoder to sensor region.
    sensorRegion = network.regions['sensor'].getSelf()
    encoderParams = {
        'consumption': {
            'fieldname': 'consumption',
            'resolution': 0.88,
            'seed': 1,
            'name': 'consumption',
            'type': 'RandomDistributedScalarEncoder'
        }
    }

    encoder = MultiEncoder()
    encoder.addMultipleEncoders(encoderParams)
    sensorRegion.encoder = encoder

    # Add data source.
    testDir = os.path.dirname(os.path.abspath(__file__))
    inputFile = os.path.join(testDir, 'fixtures', 'gymdata-test.csv')
    dataSource = FileRecordStream(streamID=inputFile)
    sensorRegion.dataSource = dataSource

    # Get and set what field index we want to predict.
    network.regions['sensor'].setParameter('predictedField', 'consumption')

    return network
def _createEncoder(encoders):
  """
  Creates and returns a MultiEncoder.

  @param encoders: (dict) Keys are the encoders' names, values are dicts of
  the params; an example is shown below.
  @return encoder: (MultiEncoder) See nupic.encoders.multi.py. Example input:
    {"energy": {"fieldname": u"energy",
                "type": "ScalarEncoder",
                "name": u"consumption",
                "minval": 0.0,
                "maxval": 100.0,
                "w": 21,
                "n": 500},
     "timestamp": {"fieldname": u"timestamp",
                   "type": "DateEncoder",
                   "name": u"timestamp_timeOfDay",
                   "timeOfDay": (21, 9.5)},
    }
  """
  if not isinstance(encoders, dict):
    raise TypeError("Encoders specified in incorrect format.")

  encoder = MultiEncoder()
  encoder.addMultipleEncoders(encoders)

  return encoder
def _createEncoder(encoders):
    """
  Creates and returns a MultiEncoder.

  @param encoders: (dict) Keys are the encoders' names, values are dicts of
  the params; an example is shown below.
  @return encoder: (MultiEncoder) See nupic.encoders.multi.py. Example input:
    {"energy": {"fieldname": u"energy",
                "type": "ScalarEncoder",
                "name": u"consumption",
                "minval": 0.0,
                "maxval": 100.0,
                "w": 21,
                "n": 500},
     "timestamp": {"fieldname": u"timestamp",
                   "type": "DateEncoder",
                   "name": u"timestamp_timeOfDay",
                   "timeOfDay": (21, 9.5)},
    }
  """
    if not isinstance(encoders, dict):
        raise TypeError("Encoders specified in incorrect format.")

    encoder = MultiEncoder()
    encoder.addMultipleEncoders(encoders)

    return encoder
Beispiel #12
0
def _createEncoder():
    """Create the encoder instance for our test and return it."""
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
        'timestamp':
        dict(fieldname='timestamp',
             type='DateEncoder',
             timeOfDay=(5, 5),
             forced=True),
        'attendeeCount':
        dict(fieldname='attendeeCount',
             type='ScalarEncoder',
             name='attendeeCount',
             minval=0,
             maxval=270,
             clipInput=True,
             w=5,
             resolution=10,
             forced=True),
        'consumption':
        dict(fieldname='consumption',
             type='ScalarEncoder',
             name='consumption',
             minval=0,
             maxval=115,
             clipInput=True,
             w=5,
             resolution=5,
             forced=True),
    })

    return encoder
Beispiel #13
0
def createEncoder(encoderParams):
    '''
  Create a multi-encoder from params.
  '''

    encoder = MultiEncoder()
    encoder.addMultipleEncoders(encoderParams)
    return encoder
def createSensors(network, sensors):
    for sensor in sensors:
        dataSource = FileRecordStream(streamID=sensor["source"])
        dataSource.setAutoRewind(True)
        encoder = MultiEncoder()
        encoder.addMultipleEncoders(fieldEncodings=sensor["encodings"])
        s = createRegion(network, sensor)
        s = s.getSelf()
        s.dataSource = dataSource
        s.encoder = encoder
    return network
Beispiel #15
0
def createCategoryEncoder():
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
        "gym": {
            "type": "CategoryEncoder",
            "fieldname": u"gym",
            "name": u"gym",
            "categoryList": ['Hornsby', 'Melbourne', 'Epping', 'Chadstone', 'North', 'Bondi', 'Pitt', 'Park', 'Canberra', 'Darlinghurst'],
            "w": 21,
            },
        })
    return encoder
Beispiel #16
0
def createClassifierEncoder():
    """Create the encoder instance for our test and return it."""
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
        "y": {
            "type": "CategoryEncoder",
            "categoryList": ['label_1', 'label_2'],
            "fieldname": u"y",
            "name": u"y",
            "w": 21,
        },
    })

    return encoder
def createClassifierEncoder():
    """Create the encoder instance for our test and return it."""
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
            "y": {
                    "type": "CategoryEncoder",
                    "categoryList": ['label_1', 'label_2'],
                    "fieldname": u"y",
                    "name": u"y",
                    "w": 21,
            },
    })

    return encoder
Beispiel #18
0
def _createEncoder():
  """Create the encoder instance for our test and return it."""
  encoder = MultiEncoder()
  encoder.addMultipleEncoders({
      'timestamp': dict(fieldname='timestamp', type='DateEncoder',
                        timeOfDay=(5,5), forced=True),
      'attendeeCount': dict(fieldname='attendeeCount', type='ScalarEncoder',
                            name='attendeeCount', minval=0, maxval=270,
                            clipInput=True, w=5, resolution=10, forced=True),
      'consumption': dict(fieldname='consumption',type='ScalarEncoder',
                          name='consumption', minval=0,maxval=115,
                          clipInput=True, w=5, resolution=5, forced=True),
  })

  return encoder
def createSensorEncoder():
    """Create the encoder instance for our test and return it."""
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
            "x": {
                    "type": "ScalarEncoder",
                    "fieldname": u"x",
                    "name": u"x",
                    "maxval": 100.0,
                    "minval": 0.0,
                    "n": 100,
                    "w": 21,
                    "clipInput": True,
            },
    })

    return encoder
Beispiel #20
0
def createSensorEncoder():
    """Create the encoder instance for our test and return it."""
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
        "x": {
            "type": "ScalarEncoder",
            "fieldname": u"x",
            "name": u"x",
            "maxval": 100.0,
            "minval": 0.0,
            "n": 100,
            "w": 21,
            "clipInput": True,
        },
    })

    return encoder
Beispiel #21
0
    def _makeRegion(self, name, params):
        sp_name    = "sp_" + name
        if self.tp_enable:
            tp_name    = "tp_" + name
        class_name = "class_" + name

        # addRegion
        self.network.addRegion(sp_name, "py.SPRegion", json.dumps(params['SP_PARAMS']))
        if self.tp_enable:
            self.network.addRegion(tp_name, "py.TPRegion", json.dumps(params['TP_PARAMS']))
        self.network.addRegion( class_name, "py.CLAClassifierRegion", json.dumps(params['CLASSIFIER_PARAMS']))

        encoder = MultiEncoder()
        encoder.addMultipleEncoders(self.class_encoder_params)
        self.classifier_encoder_list[class_name]  = encoder
        if self.tp_enable:
            self.classifier_input_list[class_name]    = tp_name
        else:
            self.classifier_input_list[class_name]    = sp_name
    def _makeRegion(self, name, params):
        sp_name    = "sp_" + name
        if self.tp_enable:
            tp_name    = "tp_" + name
        class_name = "class_" + name

        # addRegion
        self.network.addRegion(sp_name, "py.SPRegion", json.dumps(params['SP_PARAMS']))
        if self.tp_enable:
            self.network.addRegion(tp_name, "py.TPRegion", json.dumps(params['TP_PARAMS']))
        self.network.addRegion( class_name, "py.CLAClassifierRegion", json.dumps(params['CLASSIFIER_PARAMS']))

        encoder = MultiEncoder()
        encoder.addMultipleEncoders(self.class_encoder_params)
        self.classifier_encoder_list[class_name]  = encoder
        if self.tp_enable:
            self.classifier_input_list[class_name]    = tp_name
        else:
            self.classifier_input_list[class_name]    = sp_name
Beispiel #23
0
def createEncoder(multilevelAnomaly=False):
    encoder = MultiEncoder()
    if multilevelAnomaly == False:
        encoder.addMultipleEncoders({
            "cpu": {
                "fieldname": u"cpu",
                "type": "ScalarEncoder",
                "name": u"cpu",
                "minval": 0.0,
                "maxval": 100.0,
                "clipInput": True,
                "w": 21,
                "n": 500
            }
        })
    else:
        encoder.addMultipleEncoders({
            "cpu": {
                "fieldname": u"cpu",
                "type": "ScalarEncoder",
                "name": u"cpu",
                "minval": 0.0,
                "maxval": 100.0,
                "clipInput": True,
                "w": 21,
                "n": 500
            },
            "mem": {
                "fieldname": u"mem",
                "type": "ScalarEncoder",
                "name": u"mem",
                "minval": 0.0,
                "maxval": 100.0,
                "clipInput": True,
                "w": 21,
                "n": 500
            }
        })

    return encoder
Beispiel #24
0
def createEncoder():
    # TODO: vector
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
        "consumption": {
            "clipInput": True,
            "type": "ScalarEncoder",
            "fieldname": u"consumption",
            "name": u"consumption",
            "maxval": 100.0,
            "minval": 0.0,
            "n": 50,
            "w": 21,
            },
        "timestamp_timeOfDay": {
            "fieldname": u"timestamp",
            "name": u"timestamp_timeOfDay",
            "timeOfDay": (21, 9.5),
            "type": "DateEncoder",
            },
        })
    return encoder
Beispiel #25
0
def createEncoder():
    """Create the encoder instance for our test and return it."""
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
        "consumption": {
            "clipInput": True,
            "fieldname": u"consumption",
            "maxval": 100.0,
            "minval": 0.0,
            "n": 50,
            "name": u"consumption",
            "type": "ScalarEncoder",
            "w": 21,
        },
        "timestamp_timeOfDay": {
            "fieldname": u"timestamp",
            "name": u"timestamp_timeOfDay",
            "timeOfDay": (21, 9.5),
            "type": "DateEncoder",
        },
    })

    return encoder
Beispiel #26
0
def createEncoder():
  """Create the encoder instance for our test and return it."""
  encoder = MultiEncoder()
  encoder.addMultipleEncoders({
      "consumption": {
          "clipInput": True,
          "fieldname": u"consumption",
          "maxval": 100.0,
          "minval": 0.0,
          "n": 50,
          "name": u"consumption",
          "type": "ScalarEncoder",
          "w": 21,
      },
      "timestamp_timeOfDay": {
          "fieldname": u"timestamp",
          "name": u"timestamp_timeOfDay",
          "timeOfDay": (21, 9.5),
          "type": "DateEncoder",
      },
  })

  return encoder
def createEncoder():
  """
  Creates and returns a #MultiEncoder including a ScalarEncoder for
  energy consumption and a DateEncoder for the time of the day.

  @see nupic/encoders/__init__.py for type to file-name mapping
  @see nupic/encoders for encoder source files
  """
  encoder = MultiEncoder()
  encoder.addMultipleEncoders({
      "consumption": {"fieldname": u"consumption",
                      "type": "ScalarEncoder",
                      "name": u"consumption",
                      "minval": 0.0,
                      "maxval": 100.0,
                      "clipInput": True,
                      "w": 21,
                      "n": 500},
      "timestamp_timeOfDay": {"fieldname": u"timestamp",
                              "type": "DateEncoder",
                              "name": u"timestamp_timeOfDay",
                              "timeOfDay": (21, 9.5)}
  })
  return encoder
    def _createNetwork(self):

        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            if update is None:
                return None
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update


        self.network = Network()

        # check
        # if self.selectivity not in self.dest_region_params.keys():
        #     raise Exception, "There is no selected region : " + self.selectivity
        if not len(self.net_structure.keys()) == len(set(self.net_structure.keys())):
            raise Exception, "There is deplicated net_structure keys : " + self.net_structure.keys()

        # sensor
        for sensor_name, params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor", json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            #params = deepupdate(cn.SENSOR_PARAMS, params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders( params )
            sensor.encoder         = encoder
            sensor.dataSource      = DataBuffer()


        # network
        print 'create element ...'
        for name in self.dest_region_params.keys():
            change_params = self.dest_region_params[name]
            params = deepupdate(self.default_params, change_params)
            # input width
            input_width = 0
            for source in [s for s,d in self.net_structure.items() if name in d]:
                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    input_width += sensor.encoder.getWidth()
                else:
                    input_width += params['TP_PARAMS']['cellsPerColumn'] * params['TP_PARAMS']['columnCount']

            params['SP_PARAMS']['inputWidth'] = input_width
            self._makeRegion(name, params)

        # link
        print 'link network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                if source in self.sensor_params.keys():
                    self._linkRegion(source, dest)
                else:
                    if self.tp_enable:
                        self._linkRegion("tp_" + source, dest)
                    else:
                        self._linkRegion("sp_" + source, dest)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in self.dest_region_params.keys():
            self._initRegion(name)

        return
    def _createNetwork(self):

        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update


        from nupic.algorithms.anomaly import computeAnomalyScore
        from nupic.encoders import MultiEncoder
        from nupic.engine import Network
        import create_network as cn
        import json
        import itertools


        self.network = Network()

        # sensor
        for sensor_name, change_params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor", json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            params = deepupdate(cn.SENSOR_PARAMS, change_params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders( params )
            sensor.encoder         = encoder

            # set datasource
            sensor.dataSource      = cn.DataBuffer()


        # network
        print 'create network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                change_params = self.dest_resgion_data[dest]
                params = deepupdate(cn.PARAMS, change_params)

                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    params['SP_PARAMS']['inputWidth'] = sensor.encoder.getWidth()
                    self._addRegion(source, dest, params)
                else:
                    self._addRegion("tp_" + source, dest, params)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            self._initRegion(name)


        # TODO: 1-3-1構造で, TPのセル数をむやみに増やすことは逆効果になるのでは?

        return
def createEncoder(encoderParams):
  encoder = MultiEncoder()
  encoder.addMultipleEncoders(encoderParams)
  return encoder
def createEncoder(encoderParams):
    encoder = MultiEncoder()
    encoder.addMultipleEncoders(encoderParams)
    return encoder
Beispiel #32
0
    def _createNetwork(self):
        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            if update is None:
                return None
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update

        self.network = Network()

        # check
        # if self.selectivity not in self.dest_region_params.keys():
        #     raise Exception, "There is no selected region : " + self.selectivity
        if not len(self.net_structure.keys()) == len(
                set(self.net_structure.keys())):
            raise Exception, "There is deplicated net_structure keys : " + self.net_structure.keys(
            )

        # sensor
        for sensor_name, params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor",
                                   json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            #params = deepupdate(cn.SENSOR_PARAMS, params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders(params)
            sensor.encoder = encoder
            sensor.dataSource = DataBuffer()

        # network
        print 'create element ...'
        for name in self.dest_region_params.keys():
            change_params = self.dest_region_params[name]
            params = deepupdate(self.default_params, change_params)
            # input width
            input_width = 0
            for source in [
                    s for s, d in self.net_structure.items() if name in d
            ]:
                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    input_width += sensor.encoder.getWidth()
                else:
                    input_width += params['TP_PARAMS'][
                        'cellsPerColumn'] * params['TP_PARAMS']['columnCount']

            params['SP_PARAMS']['inputWidth'] = input_width
            self._makeRegion(name, params)

        # link
        print 'link network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                if source in self.sensor_params.keys():
                    self._linkRegion(source, dest)
                else:
                    if self.tp_enable:
                        self._linkRegion("tp_" + source, dest)
                    else:
                        self._linkRegion("sp_" + source, dest)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in self.dest_region_params.keys():
            self._initRegion(name)

        return
Beispiel #33
0
class Entity():
    def __init__(self, columnCount, InputEncoderParams, toL4ConnectorParamsI,
                 toL4ConnectorParamsII, toL5ConnectorParams,
                 toD1ConnectorParams, toD2ConnectorParams, L4Params, L5Params,
                 k, D1Params, D2Params):
        self.columnCount = columnCount
        self.toL4ConnectorParamsI = toL4ConnectorParamsI
        self.toL4ConnectorParamsII = toL4ConnectorParamsII
        self.toL5ConnectorParams = toL5ConnectorParams
        self.toD1ConnectorParams = toD1ConnectorParams
        self.toD2ConnectorParams = toD2ConnectorParams
        self.L4Params = L4Params
        self.L5Params = L5Params
        self.k = k
        self.D1Params = D1Params
        self.D2Params = D2Params
        self.learning = False

        #encoder
        from nupic.encoders import MultiEncoder
        self.InputEncoder = MultiEncoder()
        self.InputEncoder.addMultipleEncoders(InputEncoderParams)
        print "Encoder Online"

        #spatialPoolers
        from nupic.algorithms.spatial_pooler import SpatialPooler
        self.toL4ConnectorI = SpatialPooler(
            inputDimensions=(toL4ConnectorParamsI["inputDimensions"], ),
            columnDimensions=(columnCount, ),
            potentialPct=toL4ConnectorParamsI["potentialPct"],
            globalInhibition=toL4ConnectorParamsI["globalInhibition"],
            localAreaDensity=toL4ConnectorParamsI["localAreaDensity"],
            numActiveColumnsPerInhArea=toL4ConnectorParamsI[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL4ConnectorParamsI["synPermInactiveDec"],
            synPermActiveInc=toL4ConnectorParamsI["synPermActiveInc"],
            synPermConnected=toL4ConnectorParamsI["synPermConnected"],
            boostStrength=toL4ConnectorParamsI["boostStrength"],
            seed=toL4ConnectorParamsI["seed"],
            wrapAround=toL4ConnectorParamsI["wrapAround"])  #this part sucks
        self.toL4ConnectorII = SpatialPooler(
            inputDimensions=(columnCount * 3, ),
            columnDimensions=(columnCount, ),
            potentialPct=toL4ConnectorParamsII["potentialPct"],
            globalInhibition=toL4ConnectorParamsII["globalInhibition"],
            localAreaDensity=toL4ConnectorParamsII["localAreaDensity"],
            numActiveColumnsPerInhArea=toL4ConnectorParamsII[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL4ConnectorParamsII["synPermInactiveDec"],
            synPermActiveInc=toL4ConnectorParamsII["synPermActiveInc"],
            synPermConnected=toL4ConnectorParamsII["synPermConnected"],
            boostStrength=toL4ConnectorParamsII["boostStrength"],
            seed=toL4ConnectorParamsII["seed"],
            wrapAround=toL4ConnectorParamsII["wrapAround"])
        print "toL4Connector Online"
        self.toL5Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toL5ConnectorParams["potentialPct"],
            globalInhibition=toL5ConnectorParams["globalInhibition"],
            localAreaDensity=toL5ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toL5ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL5ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toL5ConnectorParams["synPermActiveInc"],
            synPermConnected=toL5ConnectorParams["synPermConnected"],
            boostStrength=toL5ConnectorParams["boostStrength"],
            seed=toL5ConnectorParams["seed"],
            wrapAround=toL5ConnectorParams["wrapAround"])
        print "toL5Connector Online"
        self.toD1Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toD1ConnectorParams["potentialPct"],
            globalInhibition=toD1ConnectorParams["globalInhibition"],
            localAreaDensity=toD1ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toD1ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toD1ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toD1ConnectorParams["synPermActiveInc"],
            synPermConnected=toD1ConnectorParams["synPermConnected"],
            boostStrength=toD1ConnectorParams["boostStrength"],
            seed=toD1ConnectorParams["seed"],
            wrapAround=toD1ConnectorParams["wrapAround"])
        print "toD1Connector Online"
        self.toD2Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toD2ConnectorParams["potentialPct"],
            globalInhibition=toD2ConnectorParams["globalInhibition"],
            localAreaDensity=toD2ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toD2ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toD2ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toD2ConnectorParams["synPermActiveInc"],
            synPermConnected=toD2ConnectorParams["synPermConnected"],
            boostStrength=toD2ConnectorParams["boostStrength"],
            seed=toD2ConnectorParams["seed"],
            wrapAround=toD2ConnectorParams["wrapAround"])
        print "toD2Connector Online"

        #HTM Layers
        from nupic.algorithms.temporal_memory import TemporalMemory
        self.L4ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.L4 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
        )
        print "L4 Online"
        self.L5ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.L5 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
        )
        print "L5 Online"
        self.D1ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.D1 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
            initialPermanence=0.21,
            connectedPermanence=0.5,
        )
        print "D1 Online"
        self.D2ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.D2 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
            initialPermanence=0.21,
            connectedPermanence=0.5,
        )
        print "D2 Online"

    def encode_input(sine1, sine2, angularSpeed1, angularSpeed2,
                     efferenceCopy):
        return self.InputEncoder.encode({
            "sine1": sine1,
            "sine2": sine2,
            "angularSpeed1": angularSpeed1,
            "angularSpeed2": angularSpeed2,
            "efferenceCopy": efferenceCopy
        })

    def reset(self):
        self.action = 0
        self.L4.reset()
        self.L5.reset()
        self.D1.reset()
        self.D2.reset()

    def mimic(self, observation, action):
        #mimicking only requires remembering the given obs-act pattern,thus the striatum is neglected in this func
        self.learning = True
        self.action = action
        encodedInput = self.encode_input(observation[0], observation[2],
                                         observation[4], observation[5],
                                         str(action))

        self.toL4ConnectorI.compute(encodedInput, self.learning,
                                    self.L4ActiveColumns)
        self.L4.compute(self.L4ActiveColumns, learn=self.learning)
        L4activeColumnIndices = numpy.nonzero(self.L4ActiveColumns)[0]

        L5Temp = numpy.zeros(self.columnCount, dtype=int)
        for column in L4activeColumnIndices:
            L5Temp[column] = 1
        self.toL5Connector.compute(L5Temp, self.learning, self.L5ActiveColumns)
        self.L5.compute(self.L5ActiveColumns, learn=self.learning)
        L5activeColumnIndices = numpy.nonzero(self.L5ActiveColumns)[0]

        #no action generation is needed in this func

    def learn(self, env, observation, expectedReaction):
        #We humans learn by trial and error,so does an AI agent.For neural networks,they have BP,but HTM does not
        #have a clear way to reinforcement learn(Where to feed in rewards?).Here I try to do something new.
        self.learning = False  #...trial
        encodedInput = self.encode_input(observation[0], observation[2],
                                         observation[4], observation[5],
                                         str(self.action))

        self.toL4ConnectorI.compute(encodedInput, self.learning,
                                    self.L4ActiveColumns)
        L4Temp = numpy.zeros(
            self.columnCount * 3, dtype=int
        )  #ready to receive D1's disinhibition and D2's inhibition
        L4activeColumnIndices = numpy.nonzero(self.L4ActiveColumns)[0]
        for column in L4activeColumnIndices:
            L4Temp[int(column) * 3] = 1
        D1ActiveColumnsIndices = numpy.nonzero(self.D1ActiveColumns)[0]
        for column in D1ActiveColumnsIndices:
            L4Temp[int(column) * 3 + 1] = 1
        D2ActiveColumnsIndices = numpy.nonzero(self.D2ActiveColumns)[0]
        for i in range(self.columnCount - 1):
            L4Temp[i * 3 + 2] = 1
        for column in D2ActiveColumnsIndices:  #achieve inhibition in this way
            L4Temp[i * 3 + 2] = 0
        self.toL4ConnectorII.compute(L4Temp, self.learning,
                                     self.L4ActiveColumns)
        self.L4.compute(self.L4ActiveColumns, learn=self.learning)
        L4activeColumnIndices = numpy.nonzero(self.L4ActiveColumns)[0]

        L5Temp = numpy.zeros(self.columnCount, dtype=int)
        for column in L4activeColumnIndices:
            L5Temp[column] = 1
        self.toL5Connector.compute(L5Temp, self.learning, self.L5ActiveColumns)
        self.L5.compute(self.L5ActiveColumns, learn=self.learning)
        L5activeColumnIndices = numpy.nonzero(self.L5ActiveColumns)[0]

        #Action Generation
        p = 84  #there are 84 bits in the SDR representing the action fed in the agent,this is the "Efference Copy"
        count0 = 0
        count1 = 0
        count2 = 0
        for activeIndice in L5activeColumnIndices:
            convertedIndice = (activeIndice + 1) * 1126 / columnCount
            if convertedIndice <= 1126 - p / 4 and convertedIndice > 1126 - p / 2:
                count2 = count2 + 1
            if convertedIndice <= 1126 - p / 2 and convertedIndice > 1126 - 3 * p / 4:
                count1 = count1 + 1
            if convertedIndice <= 1126 - 3 * p / 4 and convertedIndice > 1126 - p:
                count0 = count0 + 1

        if count2 == max(count0, count1, count2):
            self.action = 2
        if count1 == max(count0, count1, count2):
            self.action = 1
        if count0 == max(count0, count1, count2):
            self.action = 0

        #...and error
        if self.action == expectedReaction:
            reward = 0.1
        else:
            reward = -0.1

        self.D1.setConnectedPermanence(self.D1.getConnectedPermanence() *
                                       (self.k**(-reward)))  #reward
        self.D2.setConnectedPermanence(self.D2.getConnectedPermanence() *
                                       (self.k**reward))  #punishment

        #Learn to correct mistakes(remember what's right and whats' wrong)
        self.learning = True
        DTemp = numpy.zeros(self.columnCount, dtype=int)
        for column in L5activeColumnIndices:
            DTemp[column] = 1
        self.toD1Connector.compute(DTemp, self.learning, self.D1ActiveColumns)
        self.toD2Connector.compute(DTemp, self.learning, self.D2ActiveColumns)
        self.D1.compute(self.D1ActiveColumns, learn=self.learning)
        self.D2.compute(self.D2ActiveColumns, learn=self.learning)

        return reward

    def react(self, observation):
        self.learning = False
        encodedInput = self.encode_input(observation[0], observation[2],
                                         observation[4], observation[5],
                                         str(self.action))

        self.toL4ConnectorI.compute(encodedInput, self.learning,
                                    self.L4ActiveColumns)
        L4Temp = numpy.zeros(self.columnCount * 3, dtype=int)
        L4activeColumnIndices = numpy.nonzero(self.L4ActiveColumns)[0]
        for column in L4activeColumnIndices:
            L4Temp[int(column) * 3] = 1
        D1ActiveColumnsIndices = numpy.nonzero(self.D1ActiveColumns)[0]
        for column in D1ActiveColumnsIndices:
            L4Temp[int(column) * 3 + 1] = 1
        D2ActiveColumnsIndices = numpy.nonzero(self.D2ActiveColumns)[0]
        for i in range(self.columnCount - 1):
            L4Temp[i * 3 + 2] = 1
        for column in D2ActiveColumnsIndices:
            L4Temp[i * 3 + 2] = 0
        self.toL4ConnectorII.compute(L4Temp, self.learning,
                                     self.L4ActiveColumns)
        self.L4.compute(self.L4ActiveColumns, learn=self.learning)
        L4activeColumnIndices = numpy.nonzero(self.L4ActiveColumns)[0]

        L5Temp = numpy.zeros(self.columnCount, dtype=int)
        for column in L4activeColumnIndices:
            L5Temp[column] = 1
        self.toL5Connector.compute(L5Temp, self.learning, self.L5ActiveColumns)
        self.L5.compute(self.L5ActiveColumns, learn=self.learning)
        L5activeColumnIndices = numpy.nonzero(self.L5ActiveColumns)[0]

        p = 84
        count0 = 0
        count1 = 0
        count2 = 0
        for activeIndice in L5activeColumnIndices:
            convertedIndice = (activeIndice + 1) * 1126 / columnCount
            if convertedIndice <= 1126 - p / 4 and convertedIndice > 1126 - p / 2:
                count2 = count2 + 1
            if convertedIndice <= 1126 - p / 2 and convertedIndice > 1126 - 3 * p / 4:
                count1 = count1 + 1
            if convertedIndice <= 1126 - 3 * p / 4 and convertedIndice > 1126 - p:
                count0 = count0 + 1

        if count2 == max(count0, count1, count2):
            self.action = 2
        if count1 == max(count0, count1, count2):
            self.action = 1
        if count0 == max(count0, count1, count2):
            self.action = 0

        return self.action
def createEncoder(encoderParams):
  """Create a multi-encoder from params."""
  encoder = MultiEncoder()
  encoder.addMultipleEncoders(encoderParams)
  return encoder
    def _createNetwork(self):

        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            if update is None:
                return None
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update




        self.network = Network()

        # check
        if self.selectivity not in self.dest_resgion_data.keys():
            raise Exception, "There is no selected region : " + self.selectivity
        if not len(self.net_structure.keys()) == len(set(self.net_structure.keys())):
            raise Exception, "There is deplicated net_structure keys : " + self.net_structure.keys()

        # sensor
        for sensor_name, change_params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor", json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            params = deepupdate(cn.SENSOR_PARAMS, change_params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders( params )
            sensor.encoder         = encoder

            # set datasource
            sensor.dataSource      = cn.DataBuffer()


        # network
        print 'create network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                change_params = self.dest_resgion_data[dest]
                params = deepupdate(cn.PARAMS, change_params)

                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    params['SP_PARAMS']['inputWidth'] = sensor.encoder.getWidth()
                    self._addRegion(source, dest, params)
                else:
                    #self._addRegion("sp_" + source, dest, params)
                    self._addRegion("tp_" + source, dest, params)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            self._initRegion(name)


        # TODO: 1-3-1構造で, TPのセル数をむやみに増やすことは逆効果になるのでは?

        return