Esempio n. 1
0
def _createEncoder(encoders):
    """
  Creates and returns a MultiEncoder.

  @param encoders: (dict) Keys are the encoders' names, values are dicts of
  the params; an example is shown below.
  @return encoder: (MultiEncoder) See nupic.encoders.multi.py. Example input:
    {"energy": {"fieldname": u"energy",
                "type": "ScalarEncoder",
                "name": u"consumption",
                "minval": 0.0,
                "maxval": 100.0,
                "w": 21,
                "n": 500},
     "timestamp": {"fieldname": u"timestamp",
                   "type": "DateEncoder",
                   "name": u"timestamp_timeOfDay",
                   "timeOfDay": (21, 9.5)},
    }
  """
    if not isinstance(encoders, dict):
        raise TypeError("Encoders specified in incorrect format.")

    encoder = MultiEncoder()
    encoder.addMultipleEncoders(encoders)

    return encoder
Esempio n. 2
0
def _createEncoder():
    """Create the encoder instance for our test and return it."""
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
        'timestamp':
        dict(fieldname='timestamp', type='DateEncoder', timeOfDay=(5, 5)),
        'attendeeCount':
        dict(fieldname='attendeeCount',
             type='ScalarEncoder',
             name='attendeeCount',
             minval=0,
             maxval=270,
             clipInput=True,
             w=5,
             resolution=10),
        'consumption':
        dict(fieldname='consumption',
             type='ScalarEncoder',
             name='consumption',
             minval=0,
             maxval=115,
             clipInput=True,
             w=5,
             resolution=5),
    })

    return encoder
Esempio n. 3
0
def createEncoder():
    """
  Creates and returns a #MultiEncoder including a ScalarEncoder for
  energy consumption and a DateEncoder for the time of the day.

  @see nupic/encoders/__init__.py for type to file-name mapping
  @see nupic/encoders for encoder source files
  """
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
        "consumption": {
            "fieldname": u"consumption",
            "type": "ScalarEncoder",
            "name": u"consumption",
            "minval": 0.0,
            "maxval": 100.0,
            "clipInput": True,
            "w": 21,
            "n": 500
        },
        "timestamp_timeOfDay": {
            "fieldname": u"timestamp",
            "type": "DateEncoder",
            "name": u"timestamp_timeOfDay",
            "timeOfDay": (21, 9.5)
        }
    })
    return encoder
Esempio n. 4
0
    def initialize(self):
        """
        Initialize this node.
        """
        Node.initialize(self)

        # Initialize input bits
        self.bits = []
        for x in range(self.width):
            for y in range(self.height):
                bit = Bit()
                bit.x = x
                bit.y = y
                self.bits.append(bit)

        if self.data_source_type == DataSourceType.FILE:
            """
            Initialize this node opening the file and place cursor on the first record.
            """

            # If file name provided is a relative path, use project file path
            if self.file_name != '' and os.path.dirname(self.file_name) == '':
                full_file_name = os.path.dirname(Global.project.file_name) + '/' + self.file_name
            else:
                full_file_name = self.file_name

            # Check if file really exists
            if not os.path.isfile(full_file_name):
                QtWidgets.QMessageBox.warning(None, "Warning", "Input stream file '" + full_file_name + "' was not found or specified.", QtWidgets.QMessageBox.Ok)
                return

            # Create a data source for read the file
            self.data_source = FileRecordStream(full_file_name)

        elif self.data_source_type == DataSourceType.DATABASE:
            pass

        self.encoder = MultiEncoder()
        for encoding in self.encodings:
            encoding.initialize()

            # Create an instance class for an encoder given its module, class and constructor params
            encoding.encoder = getInstantiatedClass(encoding.encoder_module, encoding.encoder_class, encoding.encoder_params)

            # Take the first part of encoder field name as encoder name
            # Ex: timestamp_weekend.weekend => timestamp_weekend
            encoding.encoder.name = encoding.encoder_field_name.split('.')[0]

            # Add sub-encoder to multi-encoder list
            self.encoder.addEncoder(encoding.data_source_field_name, encoding.encoder)

        # If encoder size is not the same to sensor size then throws exception
        encoder_size = self.encoder.getWidth()
        sensor_size = self.width * self.height
        if encoder_size > sensor_size:
            QtWidgets.QMessageBox.warning(None, "Warning", "'" + self.name + "': Encoder size (" + str(encoder_size) + ") is different from sensor size (" + str(self.width) + " x " + str(self.height) + " = " + str(sensor_size) + ").", QtWidgets.QMessageBox.Ok)
            return

        return True
Esempio n. 5
0
def createEncoder(encoderParams):
    '''
  Create a multi-encoder from params.
  '''

    encoder = MultiEncoder()
    encoder.addMultipleEncoders(encoderParams)
    return encoder
Esempio n. 6
0
def createEncoder(rdse_resolution):
    """Create the encoder instance for our test and return it."""
    series_rdse = RandomDistributedScalarEncoder(
        rdse_resolution,
        name="rdse with resolution {}".format(rdse_resolution))
    encoder = MultiEncoder()
    encoder.addEncoder("series", series_rdse)
    return encoder
Esempio n. 7
0
 def __init__(self, resolution=.5):
     """Create the encoder instance for our test and return it."""
     self.resolution = resolution
     self.series_encoder = RandomDistributedScalarEncoder(
         self.resolution, name="RDSE-(res={})".format(self.resolution))
     self.encoder = MultiEncoder()
     self.encoder.addEncoder("series", self.series_encoder)
     self.last_m_encode = np.zeros(1)
Esempio n. 8
0
def createEncoder():
  """Create the encoder instance for our test and return it."""
  consumption_encoder = ScalarEncoder(21, 0.0, 100.0, n=50, name="consumption",
      clipInput=True)
  time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay")

  encoder = MultiEncoder()
  encoder.addEncoder("consumption", consumption_encoder)
  encoder.addEncoder("timestamp", time_encoder)

  return encoder
Esempio n. 9
0
def createClassifierEncoder():
    """Create the encoder instance for our test and return it."""
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
        "y": {
            "type": "CategoryEncoder",
            "categoryList": ['label_1', 'label_2'],
            "fieldname": u"y",
            "name": u"y",
            "w": 21,
        },
    })

    return encoder
Esempio n. 10
0
def createEncoder():

    diagCoorA_encoder = ScalarEncoder(55, 0.0, 200.0, n=200, name="diagCoorA")
    diagCoorB_encoder = ScalarEncoder(55, 0.0, 200.0, n=200, name="diagCoorB")
    diagCoorC_encoder = ScalarEncoder(55, 0.0, 200.0, n=200, name="diagCoorC")

    global encoder
    encoder = MultiEncoder()

    encoder.addEncoder("diagCoorA", diagCoorA_encoder)
    encoder.addEncoder("diagCoorB", diagCoorB_encoder)
    encoder.addEncoder("diagCoorC", diagCoorC_encoder)

    return encoder
Esempio n. 11
0
def createSensorEncoder():
    """Create the encoder instance for our test and return it."""
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
        "x": {
            "type": "ScalarEncoder",
            "fieldname": u"x",
            "name": u"x",
            "maxval": 100.0,
            "minval": 0.0,
            "n": 100,
            "w": 21,
            "clipInput": True,
        },
    })

    return encoder
Esempio n. 12
0
    def _makeRegion(self, name, params):
        sp_name    = "sp_" + name
        if self.tp_enable:
            tp_name    = "tp_" + name
        class_name = "class_" + name

        # addRegion
        self.network.addRegion(sp_name, "py.SPRegion", json.dumps(params['SP_PARAMS']))
        if self.tp_enable:
            self.network.addRegion(tp_name, "py.TPRegion", json.dumps(params['TP_PARAMS']))
        self.network.addRegion( class_name, "py.CLAClassifierRegion", json.dumps(params['CLASSIFIER_PARAMS']))

        encoder = MultiEncoder()
        encoder.addMultipleEncoders(self.class_encoder_params)
        self.classifier_encoder_list[class_name]  = encoder
        if self.tp_enable:
            self.classifier_input_list[class_name]    = tp_name
        else:
            self.classifier_input_list[class_name]    = sp_name
Esempio n. 13
0
def createEncoder(multilevelAnomaly=False):
    encoder = MultiEncoder()
    if multilevelAnomaly == False:
        encoder.addMultipleEncoders({
            "cpu": {
                "fieldname": u"cpu",
                "type": "ScalarEncoder",
                "name": u"cpu",
                "minval": 0.0,
                "maxval": 100.0,
                "clipInput": True,
                "w": 21,
                "n": 500
            }
        })
    else:
        encoder.addMultipleEncoders({
            "cpu": {
                "fieldname": u"cpu",
                "type": "ScalarEncoder",
                "name": u"cpu",
                "minval": 0.0,
                "maxval": 100.0,
                "clipInput": True,
                "w": 21,
                "n": 500
            },
            "mem": {
                "fieldname": u"mem",
                "type": "ScalarEncoder",
                "name": u"mem",
                "minval": 0.0,
                "maxval": 100.0,
                "clipInput": True,
                "w": 21,
                "n": 500
            }
        })

    return encoder
Esempio n. 14
0
def createEncoder():
    #volume_encoder = ScalarEncoder(7, 0.0, 70.0, n=200, name="volume", clipInput=False, forced=True)
    #floorheight_encoder = ScalarEncoder(1, 0.0, 70.0, n=25, name="floorheight", clipInput=False, forced=True)

    diagCoorA_encoder = ScalarEncoder(257,
                                      0.0,
                                      200.0,
                                      n=2048,
                                      name="diagCoorA",
                                      clipInput=False,
                                      forced=True)
    #diagCoorB_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorB", clipInput=False, forced=True)
    #diagCoorC_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorC", clipInput=False, forced=True)
    #diagCoorD_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorD", clipInput=False, forced=True)
    #diagCoorE_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorE", clipInput=False, forced=True)
    #diagCoorF_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorF", clipInput=False, forced=True)
    #diagCoorG_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorG", clipInput=False, forced=True)
    #diagCoorH_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorH", clipInput=False, forced=True)
    #diagCoorI_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorI", clipInput=False, forced=True)
    #diagCoorJ_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorJ", clipInput=False, forced=True)

    global encoder
    encoder = MultiEncoder()

    #encoder.addEncoder("volume", volume_encoder)
    #encoder.addEncoder("floorheight", floorheight_encoder)
    encoder.addEncoder("diagCoorA", diagCoorA_encoder)
    #encoder.addEncoder("diagCoorB", diagCoorB_encoder)
    #encoder.addEncoder("diagCoorC", diagCoorC_encoder)
    #encoder.addEncoder("diagCoorD", diagCoorD_encoder)
    #encoder.addEncoder("diagCoorE", diagCoorE_encoder)
    #encoder.addEncoder("diagCoorF", diagCoorF_encoder)
    #encoder.addEncoder("diagCoorG", diagCoorG_encoder)
    #encoder.addEncoder("diagCoorH", diagCoorH_encoder)
    #encoder.addEncoder("diagCoorI", diagCoorI_encoder)
    #encoder.addEncoder("diagCoorJ", diagCoorJ_encoder)

    return encoder
Esempio n. 15
0
def createEncoder():
    """Create the encoder instance for our test and return it."""
    encoder = MultiEncoder()
    encoder.addMultipleEncoders({
        "consumption": {
            "clipInput": True,
            "fieldname": u"consumption",
            "maxval": 100.0,
            "minval": 0.0,
            "n": 50,
            "name": u"consumption",
            "type": "ScalarEncoder",
            "w": 21,
        },
        "timestamp_timeOfDay": {
            "fieldname": u"timestamp",
            "name": u"timestamp_timeOfDay",
            "timeOfDay": (21, 9.5),
            "type": "DateEncoder",
        },
    })

    return encoder
Esempio n. 16
0
def createEncoder():

    diagCoorA_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1324,
                                      name="diagCoorA",
                                      clipInput=False,
                                      forced=True)
    diagCoorB_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1324,
                                      name="diagCoorB",
                                      clipInput=False,
                                      forced=True)

    global encoder
    encoder = MultiEncoder()

    encoder.addEncoder("diagCoorA", diagCoorA_encoder)
    encoder.addEncoder("diagCoorB", diagCoorB_encoder)

    return encoder
Esempio n. 17
0
    def testDeltaFilter(self):
        """
    data looks like:        should generate deltas
      "t"   "s"               "dt"     "ds"

      t     10                 X
      t+1s  20                 1s      10
      t+1d  50                 86399   30

    r t+1d+1s  60              X
      r+1d+3s  65              2s       5

    """
        r = RecordSensor()
        filename = findDataset("extra/qa/delta.csv")
        datasource = FileRecordStream(filename)
        r.dataSource = datasource
        n = 50
        encoder = MultiEncoder({
            'blah':
            dict(fieldname="s",
                 type='ScalarEncoder',
                 n=n,
                 w=11,
                 minval=0,
                 maxval=100)
        })

        r.encoder = encoder

        # Test #1 -- no deltas
        # Make sure we get a reset when the gym changes
        resetOut = numpy.zeros((1, ), dtype='float')
        sequenceIdOut = numpy.zeros((1, ), dtype='float')
        dataOut = numpy.zeros((n, ), dtype='float')
        sourceOut = numpy.zeros((1, ), dtype='float')
        categoryOut = numpy.zeros((1, ), dtype='float')

        outputs = dict(resetOut=resetOut,
                       sourceOut=sourceOut,
                       sequenceIdOut=sequenceIdOut,
                       dataOut=dataOut,
                       categoryOut=categoryOut)
        inputs = dict()
        r.verbosity = 0

        r.compute(inputs, outputs)
        lr = r.lastRecord
        self.assertEqual(
            lr['t'],
            datetime(year=2011, month=2, day=24, hour=16, minute=8, second=0))
        self.assertEqual(lr['s'], 10)
        self.assertEqual(lr['_reset'], 1)
        self.assertTrue('dt' not in lr)
        self.assertTrue('ds' not in lr)

        r.compute(inputs, outputs)
        lr = r.lastRecord
        self.assertEqual(
            lr['t'],
            datetime(year=2011, month=2, day=24, hour=16, minute=8, second=1))
        self.assertEqual(lr['s'], 20)
        self.assertEqual(lr['_reset'], 0)

        r.compute(inputs, outputs)
        lr = r.lastRecord
        self.assertEqual(
            lr['t'],
            datetime(year=2011, month=2, day=25, hour=16, minute=8, second=0))
        self.assertEqual(lr['s'], 50)
        self.assertEqual(lr['_reset'], 0)

        r.compute(inputs, outputs)
        lr = r.lastRecord
        self.assertEqual(
            lr['t'],
            datetime(year=2011, month=2, day=25, hour=16, minute=8, second=1))
        self.assertEqual(lr['s'], 60)
        self.assertEqual(lr['_reset'], 1)

        r.compute(inputs, outputs)
        lr = r.lastRecord
        self.assertEqual(
            lr['t'],
            datetime(year=2011, month=2, day=25, hour=16, minute=8, second=3))
        self.assertEqual(lr['s'], 65)
        self.assertEqual(lr['_reset'], 0)

        # Add filters

        r.preEncodingFilters = [DeltaFilter("s", "ds"), DeltaFilter("t", "dt")]
        r.rewind()

        # skip first record, which has a reset

        r.compute(inputs, outputs)
        lr = r.lastRecord
        self.assertEqual(
            lr['t'],
            datetime(year=2011, month=2, day=24, hour=16, minute=8, second=1))
        self.assertEqual(lr['s'], 20)
        self.assertEqual(lr['_reset'],
                         1)  # this record should have a reset since
        # it is first of a sequence
        self.assertEqual(lr['dt'], 1)
        self.assertEqual(lr['ds'], 10)

        r.compute(inputs, outputs)
        lr = r.lastRecord
        self.assertEqual(
            lr['t'],
            datetime(year=2011, month=2, day=25, hour=16, minute=8, second=0))
        self.assertEqual(lr['s'], 50)
        self.assertEqual(lr['_reset'], 0)
        self.assertEqual(lr['dt'], 3600 * 24 - 1)
        self.assertEqual(lr['ds'], 30)

        # next reset record is skipped

        r.compute(inputs, outputs)
        lr = r.lastRecord
        self.assertEqual(
            lr['t'],
            datetime(year=2011, month=2, day=25, hour=16, minute=8, second=3))
        self.assertEqual(lr['s'], 65)
        self.assertEqual(lr['_reset'], 1)
        self.assertEqual(lr['dt'], 2)
        self.assertEqual(lr['ds'], 5)
    def testSimpleMulticlassNetworkPY(self):
        # Setup data record stream of fake data (with three categories)
        filename = _getTempFileName()
        fields = [("timestamp", "datetime", "T"), ("value", "float", ""),
                  ("reset", "int", "R"), ("sid", "int", "S"),
                  ("categories", "list", "C")]
        records = ([datetime(day=1, month=3, year=2010), 0.0, 1, 0, "0"], [
            datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1"
        ], [datetime(day=3, month=3, year=2010), 0.0, 0, 0,
            "0"], [datetime(day=4, month=3, year=2010), 1.0, 0, 0,
                   "1"], [datetime(day=5, month=3, year=2010), 0.0, 0, 0, "0"],
                   [datetime(day=6, month=3, year=2010), 1.0, 0, 0, "1"
                    ], [datetime(day=7, month=3, year=2010), 0.0, 0, 0, "0"],
                   [datetime(day=8, month=3, year=2010), 1.0, 0, 0, "1"])
        dataSource = FileRecordStream(streamID=filename,
                                      write=True,
                                      fields=fields)
        for r in records:
            dataSource.appendRecord(list(r))

        # Create the network and get region instances.
        net = Network()
        net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
        net.addRegion("classifier", "py.SDRClassifierRegion",
                      "{steps: '0', alpha: 0.001, implementation: 'py'}")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="dataOut",
                 destInput="bottomUpIn")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="categoryOut",
                 destInput="categoryIn")
        sensor = net.regions["sensor"]
        classifier = net.regions["classifier"]

        # Setup sensor region encoder and data stream.
        dataSource.close()
        dataSource = FileRecordStream(filename)
        sensorRegion = sensor.getSelf()
        sensorRegion.encoder = MultiEncoder()
        sensorRegion.encoder.addEncoder(
            "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
        sensorRegion.dataSource = dataSource

        # Get ready to run.
        net.initialize()

        # Train the network (by default learning is ON in the classifier, but assert
        # anyway) and then turn off learning and turn on inference mode.
        self.assertEqual(classifier.getParameter("learningMode"), 1)
        net.run(8)

        # Test the network on the same data as it trained on; should classify with
        # 100% accuracy.
        classifier.setParameter("inferenceMode", 1)
        classifier.setParameter("learningMode", 0)

        # Assert learning is OFF and that the classifier learned the dataset.
        self.assertEqual(classifier.getParameter("learningMode"), 0,
                         "Learning mode is not turned off.")
        self.assertEqual(classifier.getParameter("inferenceMode"), 1,
                         "Inference mode is not turned on.")

        # make sure we can access all the parameters with getParameter
        self.assertEqual(classifier.getParameter("maxCategoryCount"), 2000)
        self.assertAlmostEqual(float(classifier.getParameter("alpha")), 0.001)
        self.assertEqual(int(classifier.getParameter("steps")), 0)
        self.assertTrue(classifier.getParameter("implementation") == "py")
        self.assertEqual(classifier.getParameter("verbosity"), 0)

        expectedCats = (
            [0.0],
            [1.0],
            [0.0],
            [1.0],
            [0.0],
            [1.0],
            [0.0],
            [1.0],
        )
        dataSource.rewind()
        for i in xrange(8):
            net.run(1)
            inferredCats = classifier.getOutputData("categoriesOut")
            self.assertSequenceEqual(
                expectedCats[i], inferredCats.tolist(),
                "Classififer did not infer expected category "
                "for record number {}.".format(i))
        # Close data stream, delete file.
        dataSource.close()
        os.remove(filename)
    def _createNetwork(self):

        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            if update is None:
                return None
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update




        self.network = Network()

        # check
        if self.selectivity not in self.dest_resgion_data.keys():
            raise Exception, "There is no selected region : " + self.selectivity
        if not len(self.net_structure.keys()) == len(set(self.net_structure.keys())):
            raise Exception, "There is deplicated net_structure keys : " + self.net_structure.keys()

        # sensor
        for sensor_name, change_params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor", json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            params = deepupdate(cn.SENSOR_PARAMS, change_params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders( params )
            sensor.encoder         = encoder

            # set datasource
            sensor.dataSource      = cn.DataBuffer()


        # network
        print 'create network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                change_params = self.dest_resgion_data[dest]
                params = deepupdate(cn.PARAMS, change_params)

                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    params['SP_PARAMS']['inputWidth'] = sensor.encoder.getWidth()
                    self._addRegion(source, dest, params)
                else:
                    #self._addRegion("sp_" + source, dest, params)
                    self._addRegion("tp_" + source, dest, params)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in set( itertools.chain.from_iterable( self.net_structure.values() )):
            self._initRegion(name)


        # TODO: 1-3-1構造で, TPのセル数をむやみに増やすことは逆効果になるのでは?

        return
Esempio n. 20
0
def getDescription(datasets):

    # ========================================================================
    # Network definition

    # Encoder for the sensor
    encoder = MultiEncoder()
    if 'filenameCategory' in datasets:
        categories = [x.strip() for x in open(datasets['filenameCategory'])]
    else:
        categories = [chr(x + ord('a')) for x in range(26)]

    if config['overlappingPatterns']:
        encoder.addEncoder(
            "name",
            SDRCategoryEncoder(n=200,
                               w=config['spNumActivePerInhArea'],
                               categoryList=categories,
                               name="name"))
    else:
        encoder.addEncoder(
            "name",
            CategoryEncoder(w=config['spNumActivePerInhArea'],
                            categoryList=categories,
                            name="name"))

    # ------------------------------------------------------------------
    # Node params
    # The inputs are long, horizontal vectors
    inputDimensions = (1, encoder.getWidth())

    # Layout the coincidences vertically stacked on top of each other, each
    # looking at the entire input field.
    columnDimensions = (config['spCoincCount'], 1)

    # If we have disableSpatial, then set the number of "coincidences" to be the
    #  same as the encoder width
    if config['disableSpatial']:
        columnDimensions = (encoder.getWidth(), 1)
        config['trainSP'] = 0

    sensorParams = dict(
        # encoder/datasource are not parameters so don't include here
        verbosity=config['sensorVerbosity'])

    CLAParams = dict(
        # SP params
        disableSpatial=config['disableSpatial'],
        inputDimensions=inputDimensions,
        columnDimensions=columnDimensions,
        potentialRadius=inputDimensions[1] / 2,
        potentialPct=1.00,
        gaussianDist=0,
        commonDistributions=0,  # should be False if possibly not training
        localAreaDensity=-1,  #0.05, 
        numActiveColumnsPerInhArea=config['spNumActivePerInhArea'],
        dutyCyclePeriod=1000,
        stimulusThreshold=1,
        synPermInactiveDec=0.11,
        synPermActiveInc=0.11,
        synPermActiveSharedDec=0.0,
        synPermOrphanDec=0.0,
        minPctDutyCycleBeforeInh=0.001,
        minPctDutyCycleAfterInh=0.001,
        spVerbosity=config['spVerbosity'],
        spSeed=1,
        printPeriodicStats=int(config['spPrintPeriodicStats']),

        # TM params
        tpSeed=1,
        disableTemporal=0 if config['trainTP'] else 1,
        temporalImp=config['temporalImp'],
        nCellsPerCol=config['tpNCellsPerCol'] if config['trainTP'] else 1,
        collectStats=1,
        burnIn=2,
        verbosity=config['tpVerbosity'],
        newSynapseCount=config['spNumActivePerInhArea'],
        minThreshold=config['spNumActivePerInhArea'],
        activationThreshold=config['spNumActivePerInhArea'],
        initialPerm=config['tpInitialPerm'],
        connectedPerm=0.5,
        permanenceInc=config['tpPermanenceInc'],
        permanenceDec=config['tpPermanenceDec'],  # perhaps tune this
        globalDecay=config['tpGlobalDecay'],
        pamLength=config['tpPAMLength'],
        maxSeqLength=config['tpMaxSeqLength'],
        maxAge=config['tpMaxAge'],

        # General params
        computeTopDown=config['computeTopDown'],
        trainingStep='spatial',
    )

    dataSource = FileRecordStream(datasets['filenameTrain'])

    description = dict(
        options=dict(logOutputsDuringInference=False, ),
        network=dict(sensorDataSource=dataSource,
                     sensorEncoder=encoder,
                     sensorParams=sensorParams,
                     CLAType='py.CLARegion',
                     CLAParams=CLAParams,
                     classifierType=None,
                     classifierParams=None),
    )

    if config['trainSP']:
        description['spTrain'] = dict(
            iterationCount=config['iterationCountTrain'],
            #iter=displaySPCoincidences(50),
            #finish=printSPCoincidences()
        ),
    else:
        description['spTrain'] = dict(
            # need to train with one iteration just to initialize data structures
            iterationCount=1)

    if config['trainTP']:
        description['tpTrain'] = []
        for i in range(config['trainTPRepeats']):
            stepDict = dict(
                name='step_%d' % (i),
                setup=sensorRewind,
                iterationCount=config['iterationCountTrain'],
            )
            if config['tpTimingEvery'] > 0:
                stepDict['iter'] = printTPTiming(config['tpTimingEvery'])
                stepDict['finish'] = [printTPTiming(), printTPCells]

            description['tpTrain'].append(stepDict)

    # ----------------------------------------------------------------------------
    # Inference tests
    inferSteps = []

    if config['evalTrainingSetNumIterations'] > 0:
        # The training set. Used to train the n-grams.
        inferSteps.append(
            dict(
                name='confidenceTrain_baseline',
                iterationCount=min(config['evalTrainingSetNumIterations'],
                                   config['iterationCountTrain']),
                ppOptions=dict(
                    verbosity=config['ppVerbosity'],
                    printLearnedCoincidences=True,
                    nGrams='train',
                    #ipsDetailsFor = "name,None,2",
                ),
                #finish=printTPCells,
            ))

        # Testing the training set on both the TM and n-grams.
        inferSteps.append(
            dict(
                name='confidenceTrain_nonoise',
                iterationCount=min(config['evalTrainingSetNumIterations'],
                                   config['iterationCountTrain']),
                setup=[sensorOpen(datasets['filenameTrain'])],
                ppOptions=dict(
                    verbosity=config['ppVerbosity'],
                    printLearnedCoincidences=False,
                    nGrams='test',
                    burnIns=[1, 2, 3, 4],
                    #ipsDetailsFor = "name,None,2",
                    #ipsAt = [1,2,3,4],
                ),
            ))

        # The test set
    if True:
        if datasets['filenameTest'] != datasets['filenameTrain']:
            inferSteps.append(
                dict(
                    name='confidenceTest_baseline',
                    iterationCount=config['iterationCountTest'],
                    setup=[sensorOpen(datasets['filenameTest'])],
                    ppOptions=dict(
                        verbosity=config['ppVerbosity'],
                        printLearnedCoincidences=False,
                        nGrams='test',
                        burnIns=[1, 2, 3, 4],
                        #ipsAt = [1,2,3,4],
                        ipsDetailsFor="name,None,2",
                    ),
                ))

    description['infer'] = inferSteps

    return description
Esempio n. 21
0
    def __init__(self, columnCount, InputEncoderParams, toL4ConnectorParamsI,
                 toL4ConnectorParamsII, toL5ConnectorParams,
                 toD1ConnectorParams, toD2ConnectorParams, L4Params, L5Params,
                 k, D1Params, D2Params):
        self.columnCount = columnCount
        self.toL4ConnectorParamsI = toL4ConnectorParamsI
        self.toL4ConnectorParamsII = toL4ConnectorParamsII
        self.toL5ConnectorParams = toL5ConnectorParams
        self.toD1ConnectorParams = toD1ConnectorParams
        self.toD2ConnectorParams = toD2ConnectorParams
        self.L4Params = L4Params
        self.L5Params = L5Params
        self.k = k
        self.D1Params = D1Params
        self.D2Params = D2Params
        self.learning = False

        #encoder
        from nupic.encoders import MultiEncoder
        self.InputEncoder = MultiEncoder()
        self.InputEncoder.addMultipleEncoders(InputEncoderParams)
        print "Encoder Online"

        #spatialPoolers
        from nupic.algorithms.spatial_pooler import SpatialPooler
        self.toL4ConnectorI = SpatialPooler(
            inputDimensions=(toL4ConnectorParamsI["inputDimensions"], ),
            columnDimensions=(columnCount, ),
            potentialPct=toL4ConnectorParamsI["potentialPct"],
            globalInhibition=toL4ConnectorParamsI["globalInhibition"],
            localAreaDensity=toL4ConnectorParamsI["localAreaDensity"],
            numActiveColumnsPerInhArea=toL4ConnectorParamsI[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL4ConnectorParamsI["synPermInactiveDec"],
            synPermActiveInc=toL4ConnectorParamsI["synPermActiveInc"],
            synPermConnected=toL4ConnectorParamsI["synPermConnected"],
            boostStrength=toL4ConnectorParamsI["boostStrength"],
            seed=toL4ConnectorParamsI["seed"],
            wrapAround=toL4ConnectorParamsI["wrapAround"])  #this part sucks
        self.toL4ConnectorII = SpatialPooler(
            inputDimensions=(columnCount * 3, ),
            columnDimensions=(columnCount, ),
            potentialPct=toL4ConnectorParamsII["potentialPct"],
            globalInhibition=toL4ConnectorParamsII["globalInhibition"],
            localAreaDensity=toL4ConnectorParamsII["localAreaDensity"],
            numActiveColumnsPerInhArea=toL4ConnectorParamsII[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL4ConnectorParamsII["synPermInactiveDec"],
            synPermActiveInc=toL4ConnectorParamsII["synPermActiveInc"],
            synPermConnected=toL4ConnectorParamsII["synPermConnected"],
            boostStrength=toL4ConnectorParamsII["boostStrength"],
            seed=toL4ConnectorParamsII["seed"],
            wrapAround=toL4ConnectorParamsII["wrapAround"])
        print "toL4Connector Online"
        self.toL5Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toL5ConnectorParams["potentialPct"],
            globalInhibition=toL5ConnectorParams["globalInhibition"],
            localAreaDensity=toL5ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toL5ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL5ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toL5ConnectorParams["synPermActiveInc"],
            synPermConnected=toL5ConnectorParams["synPermConnected"],
            boostStrength=toL5ConnectorParams["boostStrength"],
            seed=toL5ConnectorParams["seed"],
            wrapAround=toL5ConnectorParams["wrapAround"])
        print "toL5Connector Online"
        self.toD1Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toD1ConnectorParams["potentialPct"],
            globalInhibition=toD1ConnectorParams["globalInhibition"],
            localAreaDensity=toD1ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toD1ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toD1ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toD1ConnectorParams["synPermActiveInc"],
            synPermConnected=toD1ConnectorParams["synPermConnected"],
            boostStrength=toD1ConnectorParams["boostStrength"],
            seed=toD1ConnectorParams["seed"],
            wrapAround=toD1ConnectorParams["wrapAround"])
        print "toD1Connector Online"
        self.toD2Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toD2ConnectorParams["potentialPct"],
            globalInhibition=toD2ConnectorParams["globalInhibition"],
            localAreaDensity=toD2ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toD2ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toD2ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toD2ConnectorParams["synPermActiveInc"],
            synPermConnected=toD2ConnectorParams["synPermConnected"],
            boostStrength=toD2ConnectorParams["boostStrength"],
            seed=toD2ConnectorParams["seed"],
            wrapAround=toD2ConnectorParams["wrapAround"])
        print "toD2Connector Online"

        #HTM Layers
        from nupic.algorithms.temporal_memory import TemporalMemory
        self.L4ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.L4 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
        )
        print "L4 Online"
        self.L5ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.L5 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
        )
        print "L5 Online"
        self.D1ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.D1 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
            initialPermanence=0.21,
            connectedPermanence=0.5,
        )
        print "D1 Online"
        self.D2ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.D2 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
            initialPermanence=0.21,
            connectedPermanence=0.5,
        )
        print "D2 Online"
Esempio n. 22
0
    half_of_year_enc = ScalarEncoder(w=1,
                                     minval=0,
                                     maxval=2,
                                     radius=1,
                                     periodic=True,
                                     name="halfOfYear",
                                     forced=True)
    year_of_decade_enc = ScalarEncoder(w=3,
                                       minval=0,
                                       maxval=10,
                                       radius=1.5,
                                       periodic=True,
                                       name="yearOfDecade",
                                       forced=True)

    date_enc = MultiEncoder()
    date_enc.addEncoder(day_of_week_enc.name, day_of_week_enc)
    date_enc.addEncoder(day_of_month_enc.name, day_of_month_enc)
    date_enc.addEncoder(first_last_of_month_enc.name, first_last_of_month_enc)
    date_enc.addEncoder(week_of_month_enc.name, week_of_month_enc)
    date_enc.addEncoder(year_of_decade_enc.name, year_of_decade_enc)
    date_enc.addEncoder(month_of_year_enc.name, month_of_year_enc)
    date_enc.addEncoder(quarter_of_year_enc.name, quarter_of_year_enc)
    date_enc.addEncoder(half_of_year_enc.name, half_of_year_enc)

    if os.path.isfile('tp.p'):
        print "loading TP from tp.p and tp.tp"
        with open("tp.p", "r") as f:
            tp = pickle.load(f)
        tp.loadFromFile("tp.tp")
    else:
Esempio n. 23
0
def getDescription(datasets):

  # ========================================================================
  # Encoder for the sensor
  encoder = MultiEncoder()

  if config['encodingFieldStyleA'] == 'contiguous':
    encoder.addEncoder('fieldA', ScalarEncoder(w=config['encodingOnBitsA'],
                        n=config['encodingFieldWidthA'], minval=0,
                        maxval=config['numAValues'], periodic=True, name='fieldA'))
  elif config['encodingFieldStyleA'] == 'sdr':
    encoder.addEncoder('fieldA', SDRCategoryEncoder(w=config['encodingOnBitsA'],
                        n=config['encodingFieldWidthA'],
                        categoryList=range(config['numAValues']), name='fieldA'))
  else:
    assert False


  if config['encodingFieldStyleB'] == 'contiguous':
    encoder.addEncoder('fieldB', ScalarEncoder(w=config['encodingOnBitsB'], 
                      n=config['encodingFieldWidthB'], minval=0, 
                      maxval=config['numBValues'], periodic=True, name='fieldB'))
  elif config['encodingFieldStyleB'] == 'zero':
    encoder.addEncoder('fieldB', SDRRandomEncoder(w=0, n=config['encodingFieldWidthB'], 
                      name='fieldB'))
  elif config['encodingFieldStyleB'] == 'sdr':
    encoder.addEncoder('fieldB', SDRCategoryEncoder(w=config['encodingOnBitsB'], 
                      n=config['encodingFieldWidthB'], 
                      categoryList=range(config['numBValues']), name='fieldB'))
  else:
    assert False



  # ========================================================================
  # Network definition


  # ------------------------------------------------------------------
  # Node params
  # The inputs are long, horizontal vectors
  inputShape = (1, encoder.getWidth())

  # Layout the coincidences vertically stacked on top of each other, each
  # looking at the entire input field. 
  coincidencesShape = (config['spCoincCount'], 1)
  inputBorder = inputShape[1]/2
  if inputBorder*2 >= inputShape[1]:
    inputBorder -= 1

  sensorParams = dict(
    # encoder/datasource are not parameters so don't include here
    verbosity=config['sensorVerbosity']
  )

  CLAParams = dict(
    inputShape = inputShape,
    inputBorder = inputBorder,
    coincidencesShape = coincidencesShape,
    coincInputRadius = inputShape[1]/2,
    coincInputPoolPct = 1.0,
    gaussianDist = 0,
    commonDistributions = 0,    # should be False if possibly not training
    localAreaDensity = -1, #0.05, 
    numActivePerInhArea = config['spNumActivePerInhArea'],
    dutyCyclePeriod = 1000,
    stimulusThreshold = 1,
    synPermInactiveDec = config['spSynPermInactiveDec'],
    synPermActiveInc = 0.02,
    synPermActiveSharedDec=0.0,
    synPermOrphanDec = 0.0,
    minPctDutyCycleBeforeInh = 0.001,
    minPctDutyCycleAfterInh = config['spMinPctDutyCycleAfterInh'],
    minDistance = 0.05,
    computeTopDown = 1,
    spVerbosity = config['spVerbosity'],
    spSeed = 1,
    printPeriodicStats = int(config['spPeriodicStats']),

    # TP params
    disableTemporal = 1,

    # General params
    trainingStep = 'spatial',
    )

  trainingDataSource = FileRecordStream(datasets['trainingFilename'])


  description = dict(
    options = dict(
      logOutputsDuringInference = False,
    ),

    network = dict(
      sensorDataSource = trainingDataSource,
      sensorEncoder = encoder, 
      sensorParams = sensorParams,

      CLAType = 'py.CLARegion',
      CLAParams = CLAParams,

      classifierType = None,
      classifierParams = None),

  )

  if config['trainSP']:
    description['spTrain'] = dict(
      iterationCount=config['iterationCount'], 
      #iter=displaySPCoincidences(50),
      finish=printSPCoincidences()
      ),
  else:
    description['spTrain'] = dict(
      # need to train with one iteration just to initialize data structures
      iterationCount=1)



  # ============================================================================
  # Inference tests
  inferSteps = []

  # ----------------------------------------
  # Training dataset
  if True:
    datasetName = 'bothTraining'
    inferSteps.append(
      dict(name = '%s_baseline' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['trainingFilename'])],
           ppOptions = dict(printLearnedCoincidences=True),
          )
      )

    inferSteps.append(
      dict(name = '%s_acc' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['trainingFilename'])],
           ppOptions = dict(onlyClassificationAcc=True,
                            tpActivationThresholds=config['tpActivationThresholds'],
                            computeDistances=True,
                            verbosity = 1),
          )
      )

  # ----------------------------------------
  # Testing dataset
  if 'testingFilename' in datasets:
    datasetName = 'bothTesting'
    inferSteps.append(
      dict(name = '%s_baseline' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['testingFilename'])],
           ppOptions = dict(printLearnedCoincidences=False),
          )
      )

    inferSteps.append(
      dict(name = '%s_acc' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['testingFilename'])],
           ppOptions = dict(onlyClassificationAcc=True,
                            tpActivationThresholds=config['tpActivationThresholds']),
          )
      )


  description['infer'] = inferSteps

  return description
Esempio n. 24
0
    def _createNetwork(self):
        def deepupdate(original, update):
            """
            Recursively update a dict.
            Subdict's won't be overwritten but also updated.
            """
            if update is None:
                return None
            for key, value in original.iteritems():
                if not key in update:
                    update[key] = value
                elif isinstance(value, dict):
                    deepupdate(value, update[key])
            return update

        self.network = Network()

        # check
        # if self.selectivity not in self.dest_region_params.keys():
        #     raise Exception, "There is no selected region : " + self.selectivity
        if not len(self.net_structure.keys()) == len(
                set(self.net_structure.keys())):
            raise Exception, "There is deplicated net_structure keys : " + self.net_structure.keys(
            )

        # sensor
        for sensor_name, params in self.sensor_params.items():
            self.network.addRegion(sensor_name, "py.RecordSensor",
                                   json.dumps({"verbosity": 0}))
            sensor = self.network.regions[sensor_name].getSelf()

            # set encoder
            #params = deepupdate(cn.SENSOR_PARAMS, params)
            encoder = MultiEncoder()
            encoder.addMultipleEncoders(params)
            sensor.encoder = encoder
            sensor.dataSource = DataBuffer()

        # network
        print 'create element ...'
        for name in self.dest_region_params.keys():
            change_params = self.dest_region_params[name]
            params = deepupdate(self.default_params, change_params)
            # input width
            input_width = 0
            for source in [
                    s for s, d in self.net_structure.items() if name in d
            ]:
                if source in self.sensor_params.keys():
                    sensor = self.network.regions[source].getSelf()
                    input_width += sensor.encoder.getWidth()
                else:
                    input_width += params['TP_PARAMS'][
                        'cellsPerColumn'] * params['TP_PARAMS']['columnCount']

            params['SP_PARAMS']['inputWidth'] = input_width
            self._makeRegion(name, params)

        # link
        print 'link network ...'
        for source, dest_list in self.net_structure.items():
            for dest in dest_list:
                if source in self.sensor_params.keys():
                    self._linkRegion(source, dest)
                else:
                    if self.tp_enable:
                        self._linkRegion("tp_" + source, dest)
                    else:
                        self._linkRegion("sp_" + source, dest)

        # initialize
        print 'initializing network ...'
        self.network.initialize()
        for name in self.dest_region_params.keys():
            self._initRegion(name)

        return
Esempio n. 25
0
    def testSimpleMulticlassNetwork(self):

        # Setup data record stream of fake data (with three categories)
        filename = _getTempFileName()
        fields = [("timestamp", "datetime", "T"), ("value", "float", ""),
                  ("reset", "int", "R"), ("sid", "int", "S"),
                  ("categories", "list", "C")]
        records = ([datetime(day=1, month=3, year=2010), 0.0, 1, 0, ""], [
            datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1 2"
        ], [datetime(day=3, month=3, year=2010), 1.0, 0, 0,
            "1 2"], [datetime(day=4, month=3, year=2010), 2.0, 0, 0, "0"], [
                datetime(day=5, month=3, year=2010), 3.0, 0, 0, "1 2"
            ], [datetime(day=6, month=3, year=2010), 5.0, 0, 0,
                "1 2"], [datetime(day=7, month=3, year=2010), 8.0, 0, 0, "0"],
                   [datetime(day=8, month=3, year=2010), 13.0, 0, 0, "1 2"])
        dataSource = FileRecordStream(streamID=filename,
                                      write=True,
                                      fields=fields)
        for r in records:
            dataSource.appendRecord(list(r))

        # Create the network and get region instances.
        net = Network()
        net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
        net.addRegion("classifier", "py.KNNClassifierRegion",
                      "{'k': 2,'distThreshold': 0,'maxCategoryCount': 3}")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="dataOut",
                 destInput="bottomUpIn")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="categoryOut",
                 destInput="categoryIn")
        sensor = net.regions["sensor"]
        classifier = net.regions["classifier"]

        # Setup sensor region encoder and data stream.
        dataSource.close()
        dataSource = FileRecordStream(filename)
        sensorRegion = sensor.getSelf()
        sensorRegion.encoder = MultiEncoder()
        sensorRegion.encoder.addEncoder(
            "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
        sensorRegion.dataSource = dataSource

        # Get ready to run.
        net.initialize()

        # Train the network (by default learning is ON in the classifier, but assert
        # anyway) and then turn off learning and turn on inference mode.
        self.assertEqual(classifier.getParameter("learningMode"), 1)
        net.run(8)
        classifier.setParameter("inferenceMode", 1)
        classifier.setParameter("learningMode", 0)

        # Assert learning is OFF and that the classifier learned the dataset.
        self.assertEqual(classifier.getParameter("learningMode"), 0,
                         "Learning mode is not turned off.")
        self.assertEqual(classifier.getParameter("inferenceMode"), 1,
                         "Inference mode is not turned on.")
        self.assertEqual(
            classifier.getParameter("categoryCount"), 3,
            "The classifier should count three total categories.")
        # classififer learns 12 patterns b/c there are 12 categories amongst the
        # records:
        self.assertEqual(
            classifier.getParameter("patternCount"), 12,
            "The classifier should've learned 12 samples in total.")

        # Test the network on the same data as it trained on; should classify with
        # 100% accuracy.
        expectedCats = ([0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5],
                        [0.5, 0.5, 0.0], [0.0, 0.5,
                                          0.5], [0.0, 0.5,
                                                 0.5], [0.5, 0.5,
                                                        0.0], [0.0, 0.5, 0.5])
        dataSource.rewind()
        for i in xrange(8):
            net.run(1)
            inferredCats = classifier.getOutputData("categoriesOut")
            self.assertSequenceEqual(
                expectedCats[i], inferredCats.tolist(),
                "Classififer did not infer expected category probabilites for record "
                "number {}.".format(i))

        # Close data stream, delete file.
        dataSource.close()
        os.remove(filename)
Esempio n. 26
0
def getDescription(datasets):
    encoder = MultiEncoder()
    encoder.addEncoder("date", DateEncoder(timeOfDay=3))
    encoder.addEncoder("amount", LogEncoder(name="amount", maxval=1000))
    for i in xrange(0, nRandomFields):
        s = ScalarEncoder(name="scalar",
                          minval=0,
                          maxval=randomFieldWidth,
                          resolution=1,
                          w=3)
        encoder.addEncoder("random%d" % i, s)

    dataSource = FunctionSource(
        generateFunction,
        dict(nRandomFields=nRandomFields, randomFieldWidth=randomFieldWidth))

    inputShape = (1, encoder.getWidth())

    # Layout the coincidences vertically stacked on top of each other, each
    # looking at the entire input field.
    coincidencesShape = (nCoincidences, 1)
    # TODO: why do we need input border?
    inputBorder = inputShape[1] / 2
    if inputBorder * 2 >= inputShape[1]:
        inputBorder -= 1

    nodeParams = dict()

    spParams = dict(
        commonDistributions=0,
        inputShape=inputShape,
        inputBorder=inputBorder,
        coincidencesShape=coincidencesShape,
        coincInputRadius=inputShape[1] / 2,
        coincInputPoolPct=0.75,
        gaussianDist=0,
        localAreaDensity=0.10,
        # localAreaDensity = 0.04,
        numActivePerInhArea=-1,
        dutyCyclePeriod=1000,
        stimulusThreshold=5,
        synPermInactiveDec=0.08,
        # synPermInactiveDec=0.02,
        synPermActiveInc=0.02,
        synPermActiveSharedDec=0.0,
        synPermOrphanDec=0.0,
        minPctDutyCycleBeforeInh=0.05,
        # minPctDutyCycleAfterInh = 0.1,
        # minPctDutyCycleBeforeInh = 0.05,
        minPctDutyCycleAfterInh=0.05,
        # minPctDutyCycleAfterInh = 0.4,
        seed=1,
    )

    otherParams = dict(
        disableTemporal=1,
        trainingStep='spatial',
    )

    nodeParams.update(spParams)
    nodeParams.update(otherParams)

    def mySetupCallback(experiment):
        print "Setup function called"

    description = dict(
        options=dict(logOutputsDuringInference=False, ),
        network=dict(sensorDataSource=dataSource,
                     sensorEncoder=encoder,
                     CLAType="py.CLARegion",
                     CLAParams=nodeParams,
                     classifierType=None,
                     classifierParams=None),

        # step
        spTrain=dict(
            name="phase1",
            setup=mySetupCallback,
            iterationCount=5000,
            #iter=displaySPCoincidences(100),
            finish=printSPCoincidences()),
        tpTrain=None,  # same format as sptrain if non-empty
        infer=None,  # same format as sptrain if non-empty
    )

    return description
Esempio n. 27
0
def getDescriptionImpl(datasets, config):
    """ Implementation for description.py getDescription() entry point function.
  Builds an experiment description dictionary as required by LPF (Lightweight
  Prediction Framework).  Hardcoded data that is less likely to vary between
  experiments is augmented with data from the config dictionary.
  See getBaseDatasets() and getDatasets().

    datasets:     a dictionary of input datasets that may have been pre-processed
                   via aggregation.  Keys:
                   'trainDataset'         -- path to the training dataset
                   'inferDataset.N.alias' -- path(s) to the inference dataset

    config:       configuration dictionary from description.py

    returns:      an experiment description dictionary as required by LPF
  """

    # ----------------------------------------------------------------------------
    # Encoder for the sensor
    encoder = MultiEncoder(_getDatasetEncoderConfig(config))

    # ------------------------------------------------------------------
    # Region params
    CLAParams = _getCLAParams(encoder=encoder, config=config)

    sensorParams = dict(
        # encoder/datasource are not parameters so don't include here
        verbosity=config['sensorVerbosity'])

    # Filesource for the sensor. Set the filename in setup functions.
    dataSource = FileRecordStream('foo')

    description = dict(
        options=dict(logOutputsDuringInference=False, ),
        network=dict(

            # Think of sensor as a shell with dataSource and encoder;
            # Encoder has a pre-encoder and post-encoder filters;
            # filters appear in a different place (TODO: where?)
            sensorDataSource=dataSource,
            sensorEncoder=encoder,

            # LPF converts this to JSON strings; used as constructor args; has simple
            # types (ints, strings, floats)
            sensorParams=sensorParams,

            # CLA class; py. prefix for class names implemented in python; older code
            # implemented regions in C++ and designated class name without prefix.
            CLAType='py.CLARegion',
            # dict converted to JSON string
            CLAParams=CLAParams,

            # classifiers are presently not used (formerly used by vision code); should
            # be okay to leave out Classifier, sensor, CLA
            classifierType=None,
            classifierParams=None),
    )

    # ----------------------------------------------------------------------------
    # Configure Training and Inference phases
    # ----------------------------------------------------------------------------
    #
    # phase is 0 or more steps (a list of dictionaries, each dict corresponds to one step)
    # (see py/nupic/frameworks/prediction/experiment.py docstring)
    #
    # step = dict (name, setup, iter, finish, iterationCount)
    #   setup, iter, finish are callbacks;
    #
    # name: step name string; optional, used for printing messages to console
    # setup: open input file (e.g., via dataSource), print stats, etc.
    # iter: for diagnostics/debugging; called by net.run between iterations.
    # finish: called at the end by net.run; usually prints out stats (e.g., how many
    #   synapses, time taken, etc.)
    # callbacks are almost always reused, so they are not experiment-specific (see
    #   imports at top of file)
    # a callback always has this form c(experiment_obj, iter_number); can get
    #   experiment.network.regions["sensor"].getSelf()

    spEnable = config['spEnable']
    spTrain = _isSPTrainingEnabled(config)

    tpEnable = config['tpEnable']
    tpTrain = _isTPTrainingEnabled(config)
    # NOTE: presently, we always train TP (during training phase) if TP is enabled
    assert (tpTrain == tpEnable)

    # At least one of SP/TP must be enabled for a meaningful system
    assert (spEnable or tpEnable)

    # NOTE: SP and Spatial regression need to undergo training over the same
    #       set of rows. Since we're not reading the training dataset here to
    #       find out the number of rows, we presently configure both with the
    #       same auto-rewind setting.
    # TODO: this may cause knn training to repeatedly iterate unnecessarily
    #       over the same records in case spTrainIterationCount is larger than the
    #       nuber of rows in the training dataset. Look into optimizing this to
    #       avoid wasting time on knn training due to unnecessary iterations, but
    #       make sure that both SP and knn train on the exact same rows.
    spTrainMayNeedAutoRewind = True \
                               if config['spTrainIterationCount'] is not None \
                               else False

    # ----------------------------------------------------------------------------
    # SP training
    if spTrain:
        description['spTrain'] = []
        for i in xrange(config['spTrainNPasses']):
            stepDict = dict(
              name='sp.train.pass_%d' % (i),
              iterationCount=config['spTrainIterationCount'],
              setup=[sensorOpen(datasets[_getTrainingDatasetKey(config)]) if i==0 \
                       else sensorRewind,
                     fileSourceAutoRewind(spTrainMayNeedAutoRewind),],
              finish=[fileSourceAutoRewind(False),],
            )

            description['spTrain'].append(stepDict)

    elif spEnable:
        description['spTrain'] = dict(
            # need to train with one iteration just to initialize data structures
            # TODO: seems like a hack; shouldn't CLA framework automatically initialize
            #   the necessary subsystems? (ask Ron)
            iterationCount=1, )

    # ----------------------------------------------------------------------------
    # TP training
    if tpTrain:
        description['tpTrain'] = []
        mayNeedAutoRewind = True if config[
            'tpTrainIterationCount'] is not None else False
        for i in xrange(config['tpTrainNPasses']):
            stepDict = dict(
              name='tp.train.pass_%d' % (i),
              iterationCount=config['tpTrainIterationCount'],
              setup=[
                sensorOpen(datasets[_getTrainingDatasetKey(config)]) if i==0 \
                  else sensorRewind,
                fileSourceAutoRewind(mayNeedAutoRewind),
                ],
              finish=[fileSourceAutoRewind(False),],
              )
            if config['tpTrainPrintStatsPeriodIter'] > 0:
                stepDict['iter'] = printTPTiming(
                    config['tpTrainPrintStatsPeriodIter'])
                stepDict['finish'] += [printTPTiming()]  #, printTPCells]

            description['tpTrain'].append(stepDict)

    # ----------------------------------------------------------------------------
    # Inference tests
    # NOTE: Presently, SP and TP learning is disabled during inference
    description['infer'] = []

    predictionFields = None
    spatialRegrTests = None
    if 'spFieldPredictionSchema' in config and config[
            'spFieldPredictionSchema'] != None:
        if len(config['spFieldPredictionSchema']['predictionFields']) > 0:
            spFieldPredictionSchema = config['spFieldPredictionSchema']
            predictionFields = spFieldPredictionSchema['predictionFields']
            if len(spFieldPredictionSchema['regressionTests']) > 0:
                # presently, our spatial regression modules (knn and linear) don't support
                # multiple fields
                assert (len(predictionFields) == 1)
                spatialRegrTests = spFieldPredictionSchema['regressionTests']

    # Set up test steps for all inference datasets
    for i, ds in enumerate(config['inferDatasets']):

        datasetInfo = config['inferDatasets'][i]

        # NOTE: the path/contents may differ from the corresponding dataset
        #       referenced in config['inferDatasets'] due to preprocessing (e.g.,
        #       aggregation)
        inferenceDatasetKey = \
          _datasetKeyFromInferenceDatasetIndex(index=i, config=config)
        inferenceDatasetPath = datasets[inferenceDatasetKey]

        # ----------------------------------------
        # Step: Temporal inference
        #
        if tpEnable:

            # Turn off plot histograms when running under darwin
            plotTemporalHistograms = True
            if sys.platform.startswith('darwin'):
                plotTemporalHistograms = False
                print "Turning off plotTemporalHistograms under darwin"

            temporalTestingStep = dict(
                name=getTemporalInferenceStepName(datasetInfo['alias'], i),
                iterationCount=ds['iterCount'],
                setup=[sensorOpen(inferenceDatasetPath)],
                ppOptions=dict(
                    verbosity=config['postprocVerbosity'],
                    plotTemporalHistograms=plotTemporalHistograms,
                    printLearnedCoincidences=False,
                    logPredictions=True,
                ))
            description['infer'].append(temporalTestingStep)
        else:
            print 'temporalTestingStep skipped.'

        # ----------------------------------------
        # Step: Non-temporal Regression algorithm training (if enabled)
        #
        if spatialRegrTests:
            # NOTE: we don't need auto-rewind when training spatial regression algorithms
            regrTrainStep = dict(
              name = ('%s_nontemporal.training') % \
                       (_normalizeDatasetAliasNameForStepName(datasetInfo['alias']),),
              iterationCount=config['spTrainIterationCount'],
              setup=[sensorOpen(datasets[_getTrainingDatasetKey(config)]),
                     fileSourceAutoRewind(spTrainMayNeedAutoRewind),],
              ppOptions = dict(verbosity=config['postprocVerbosity'],
                               printLearnedCoincidences=False,)
            )

            # Add Spatial Regression algorithm training requests
            ppOptions = regrTrainStep['ppOptions']
            for test in spatialRegrTests:
                assert (len(predictionFields) == 1)
                ppOptions[
                    test['algorithm']] = 'train,%s' % (predictionFields[0])

            description['infer'].append(regrTrainStep)

        # ----------------------------------------
        # Step: Non-temporal Inference
        #
        nontemporalTestingStep = dict(
            name=getNonTemporalInferenceStepName(datasetInfo['alias'], i),
            iterationCount=ds['iterCount'],
            setup=[
                sensorOpen(inferenceDatasetPath),
                fileSourceAutoRewind(False),
                # TODO Do we need to turn off collectStats in the 'finish' sub-step?
                setTPAttribute('collectStats', 1),
            ],
            # TODO which ppOptions do we want in this template?
            ppOptions=dict(
                verbosity=config['postprocVerbosity'],
                plotTemporalHistograms=False,
                printLearnedCoincidences=False,
                logPredictions=True,
            ),
        )

        # Add Spatial Field Prediction options to inference step
        if predictionFields:
            # Set sparse encodings of prediction fields to zero
            setup = nontemporalTestingStep['setup']
            setup.append(
                setAttribute('sensor', 'postEncodingFilters', [
                    ModifyFields(fields=predictionFields,
                                 operation='setToZero')
                ]))
        if spatialRegrTests:
            # Add regression test requests
            ppOptions = nontemporalTestingStep['ppOptions']
            for test in spatialRegrTests:
                assert (len(predictionFields) == 1)
                ppOptions[
                    test['algorithm']] = 'test,%s' % (predictionFields[0])

        description['infer'].append(nontemporalTestingStep)

    # ----------------------------------------------------------------------------
    # Add auto-reset intervals to the sensor region for tpTrain and Infer phases
    # (if config['sensorAutoReset'] is enabled)
    # ----------------------------------------------------------------------------
    if 'sensorAutoReset' in config and config['sensorAutoReset'] is not None:
        dd = defaultdict(lambda: 0, config['sensorAutoReset'])
        # class timedelta([days[, seconds[, microseconds[, milliseconds[, minutes[,
        #                 hours[, weeks]]]]]]])
        if not (0 == dd['days'] == dd['hours'] == dd['minutes'] == dd['seconds'] \
                == dd['milliseconds'] == dd['microseconds'] == dd['weeks']):
            timeDelta = timedelta(days=dd['days'],
                                  hours=dd['hours'],
                                  minutes=dd['minutes'],
                                  seconds=dd['seconds'],
                                  milliseconds=dd['milliseconds'],
                                  microseconds=dd['microseconds'],
                                  weeks=dd['weeks'])

            tpTrainSteps = description[
                'tpTrain'] if 'tpTrain' in description else []
            inferSteps = description['infer'] if 'infer' in description else []
            for step in itertools.chain(tpTrainSteps, inferSteps):
                if 'setup' not in step:
                    step['setup'] = []
                step['setup'].append(setAutoResetInterval(timeDelta))

    return description
Esempio n. 28
0
def createTemporalAnomaly(recordParams,
                          spatialParams=_SP_PARAMS,
                          temporalParams=_TP_PARAMS,
                          verbosity=_VERBOSITY):
    """Generates a Network with connected RecordSensor, SP, TP.

  This function takes care of generating regions and the canonical links.
  The network has a sensor region reading data from a specified input and
  passing the encoded representation to an SPRegion.
  The SPRegion output is passed to a TPRegion.

  Note: this function returns a network that needs to be initialized. This
  allows the user to extend the network by adding further regions and
  connections.

  :param recordParams: a dict with parameters for creating RecordSensor region.
  :param spatialParams: a dict with parameters for creating SPRegion.
  :param temporalParams: a dict with parameters for creating TPRegion.
  :param verbosity: an integer representing how chatty the network will be.
  """
    inputFilePath = recordParams["inputFilePath"]
    scalarEncoderArgs = recordParams["scalarEncoderArgs"]
    dateEncoderArgs = recordParams["dateEncoderArgs"]

    scalarEncoder = ScalarEncoder(**scalarEncoderArgs)
    dateEncoder = DateEncoder(**dateEncoderArgs)

    encoder = MultiEncoder()
    encoder.addEncoder(scalarEncoderArgs["name"], scalarEncoder)
    encoder.addEncoder(dateEncoderArgs["name"], dateEncoder)

    network = Network()

    network.addRegion("sensor", "py.RecordSensor",
                      json.dumps({"verbosity": verbosity}))

    sensor = network.regions["sensor"].getSelf()
    sensor.encoder = encoder
    sensor.dataSource = FileRecordStream(streamID=inputFilePath)

    # Create the spatial pooler region
    spatialParams["inputWidth"] = sensor.encoder.getWidth()
    network.addRegion("spatialPoolerRegion", "py.SPRegion",
                      json.dumps(spatialParams))

    # Link the SP region to the sensor input
    network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
    network.link("sensor",
                 "spatialPoolerRegion",
                 "UniformLink",
                 "",
                 srcOutput="resetOut",
                 destInput="resetIn")
    network.link("spatialPoolerRegion",
                 "sensor",
                 "UniformLink",
                 "",
                 srcOutput="spatialTopDownOut",
                 destInput="spatialTopDownIn")
    network.link("spatialPoolerRegion",
                 "sensor",
                 "UniformLink",
                 "",
                 srcOutput="temporalTopDownOut",
                 destInput="temporalTopDownIn")

    # Add the TPRegion on top of the SPRegion
    network.addRegion("temporalPoolerRegion", "py.TPRegion",
                      json.dumps(temporalParams))

    network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink",
                 "")
    network.link("temporalPoolerRegion",
                 "spatialPoolerRegion",
                 "UniformLink",
                 "",
                 srcOutput="topDownOut",
                 destInput="topDownIn")

    spatialPoolerRegion = network.regions["spatialPoolerRegion"]

    # Make sure learning is enabled
    spatialPoolerRegion.setParameter("learningMode", True)
    # We want temporal anomalies so disable anomalyMode in the SP. This mode is
    # used for computing anomalies in a non-temporal model.
    spatialPoolerRegion.setParameter("anomalyMode", False)

    temporalPoolerRegion = network.regions["temporalPoolerRegion"]

    # Enable topDownMode to get the predicted columns output
    temporalPoolerRegion.setParameter("topDownMode", True)
    # Make sure learning is enabled (this is the default)
    temporalPoolerRegion.setParameter("learningMode", True)
    # Enable inference mode so we get predictions
    temporalPoolerRegion.setParameter("inferenceMode", True)
    # Enable anomalyMode to compute the anomaly score.
    temporalPoolerRegion.setParameter("anomalyMode", True)

    return network
Esempio n. 29
0
def createEncoder():
    volume_encoder = ScalarEncoder(21,
                                   0.0,
                                   20.0,
                                   n=200,
                                   name="volume",
                                   clipInput=False)
    floorheight_encoder = ScalarEncoder(21,
                                        0.0,
                                        24.0,
                                        n=125,
                                        name="floorheight",
                                        clipInput=False)

    diagCoorA_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorA",
                                      clipInput=False)
    diagCoorB_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorB",
                                      clipInput=False)
    diagCoorC_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorC",
                                      clipInput=False)
    diagCoorD_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorD",
                                      clipInput=False)
    diagCoorE_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorE",
                                      clipInput=False)
    diagCoorF_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorF",
                                      clipInput=False)
    diagCoorG_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorG",
                                      clipInput=False)
    diagCoorH_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorH",
                                      clipInput=False)
    diagCoorI_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorI",
                                      clipInput=False)
    diagCoorJ_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorJ",
                                      clipInput=False)

    global encoder
    encoder = MultiEncoder()

    encoder.addEncoder("volume", volume_encoder)
    encoder.addEncoder("floorheight", floorheight_encoder)
    encoder.addEncoder("diagCoorA", diagCoorA_encoder)
    encoder.addEncoder("diagCoorB", diagCoorB_encoder)
    encoder.addEncoder("diagCoorC", diagCoorC_encoder)
    encoder.addEncoder("diagCoorD", diagCoorD_encoder)
    encoder.addEncoder("diagCoorE", diagCoorE_encoder)
    encoder.addEncoder("diagCoorF", diagCoorF_encoder)
    encoder.addEncoder("diagCoorG", diagCoorG_encoder)
    encoder.addEncoder("diagCoorH", diagCoorH_encoder)
    encoder.addEncoder("diagCoorI", diagCoorI_encoder)
    encoder.addEncoder("diagCoorJ", diagCoorJ_encoder)

    return encoder
def createTemporalAnomaly_chemical(recordParams, spatialParams, temporalParams,
                                   verbosity):

    inputFilePath = recordParams["inputFilePath"]
    scalarEncoder1Args = recordParams["scalarEncoder1Args"]
    scalarEncoder2Args = recordParams["scalarEncoder2Args"]
    scalarEncoder3Args = recordParams["scalarEncoder3Args"]
    scalarEncoder4Args = recordParams["scalarEncoder4Args"]
    scalarEncoder5Args = recordParams["scalarEncoder5Args"]
    scalarEncoder6Args = recordParams["scalarEncoder6Args"]
    scalarEncoder7Args = recordParams["scalarEncoder7Args"]
    dateEncoderArgs = recordParams["dateEncoderArgs"]

    scalarEncoder1 = ScalarEncoder(**scalarEncoder1Args)
    scalarEncoder2 = ScalarEncoder(**scalarEncoder2Args)
    scalarEncoder3 = ScalarEncoder(**scalarEncoder3Args)
    scalarEncoder4 = ScalarEncoder(**scalarEncoder4Args)
    scalarEncoder5 = ScalarEncoder(**scalarEncoder5Args)
    scalarEncoder6 = ScalarEncoder(**scalarEncoder6Args)
    scalarEncoder7 = ScalarEncoder(**scalarEncoder7Args)
    dateEncoder = DateEncoder(**dateEncoderArgs)

    encoder = MultiEncoder()
    encoder.addEncoder(scalarEncoder1Args["name"], scalarEncoder1)
    encoder.addEncoder(scalarEncoder2Args["name"], scalarEncoder2)
    encoder.addEncoder(scalarEncoder3Args["name"], scalarEncoder3)
    encoder.addEncoder(scalarEncoder4Args["name"], scalarEncoder4)
    encoder.addEncoder(scalarEncoder5Args["name"], scalarEncoder5)
    encoder.addEncoder(scalarEncoder6Args["name"], scalarEncoder6)
    encoder.addEncoder(scalarEncoder7Args["name"], scalarEncoder7)
    encoder.addEncoder(dateEncoderArgs["name"], dateEncoder)

    network = Network()

    network.addRegion("sensor", "py.RecordSensor",
                      json.dumps({"verbosity": verbosity}))

    sensor = network.regions["sensor"].getSelf()
    sensor.encoder = encoder
    sensor.dataSource = FileRecordStream(streamID=inputFilePath)

    # Create the spatial pooler region
    spatialParams["inputWidth"] = sensor.encoder.getWidth()
    network.addRegion("spatialPoolerRegion", "py.SPRegion",
                      json.dumps(spatialParams))

    # Link the SP region to the sensor input
    network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
    network.link("sensor",
                 "spatialPoolerRegion",
                 "UniformLink",
                 "",
                 srcOutput="resetOut",
                 destInput="resetIn")
    network.link("spatialPoolerRegion",
                 "sensor",
                 "UniformLink",
                 "",
                 srcOutput="spatialTopDownOut",
                 destInput="spatialTopDownIn")
    network.link("spatialPoolerRegion",
                 "sensor",
                 "UniformLink",
                 "",
                 srcOutput="temporalTopDownOut",
                 destInput="temporalTopDownIn")

    # Add the TPRegion on top of the SPRegion
    network.addRegion("temporalPoolerRegion", "py.TMRegion",
                      json.dumps(temporalParams))

    network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink",
                 "")
    network.link("temporalPoolerRegion",
                 "spatialPoolerRegion",
                 "UniformLink",
                 "",
                 srcOutput="topDownOut",
                 destInput="topDownIn")

    # Add the AnomalyLikelihoodRegion on top of the TMRegion
    network.addRegion("anomalyLikelihoodRegion", "py.AnomalyLikelihoodRegion",
                      json.dumps({}))
    network.link("temporalPoolerRegion",
                 "anomalyLikelihoodRegion",
                 "UniformLink",
                 "",
                 srcOutput="anomalyScore",
                 destInput="rawAnomalyScore")
    network.link("sensor",
                 "anomalyLikelihoodRegion",
                 "UniformLink",
                 "",
                 srcOutput="sourceOut",
                 destInput="metricValue")

    spatialPoolerRegion = network.regions["spatialPoolerRegion"]

    # Make sure learning is enabled
    spatialPoolerRegion.setParameter("learningMode", True)
    # We want temporal anomalies so disable anomalyMode in the SP. This mode is
    # used for computing anomalies in a non-temporal model.
    spatialPoolerRegion.setParameter("anomalyMode", False)

    temporalPoolerRegion = network.regions["temporalPoolerRegion"]

    # Enable topDownMode to get the predicted columns output
    temporalPoolerRegion.setParameter("topDownMode", True)
    # Make sure learning is enabled (this is the default)
    temporalPoolerRegion.setParameter("learningMode", True)
    # Enable inference mode so we get predictions
    temporalPoolerRegion.setParameter("inferenceMode", True)
    # Enable anomalyMode to compute the anomaly score.
    temporalPoolerRegion.setParameter("anomalyMode", True)

    return network