예제 #1
0
class Model(object):


  def __init__(self):
    self.sensorEncoder = ScalarEncoder(n=512, w=21, minval=8.9, maxval=40,
                                       clipInput=True, forced=True)
    self.motorEncoder = ScalarEncoder(n=512, w=21, minval=-400, maxval=400,
                                      clipInput=True, forced=True)

    self.experimentRunner = SensorimotorExperimentRunner(
      tmOverrides={
        "columnDimensions": [512],
        "maxNewSynapseCount": 21*2,
        "minThreshold": 16*2,
        "activationThreshold": 16*2
      },
      tpOverrides={
        "columnDimensions": [512],
        "numActiveColumnsPerInhArea": 20,
        "poolingThreshUnpredicted": 0.5
      }
    )


  def feed(self, sensorValue, motorValue, sequenceLabel=None):    
    sensorSDR = set(self.sensorEncoder.encode(sensorValue).nonzero()[0].tolist())
    motorSDR = set((self.motorEncoder.encode(motorValue).nonzero()[0] +
                    self.sensorEncoder.n).tolist())
    sensorimotorSDR = sensorSDR.union(motorSDR)

    self.experimentRunner.feedTransition(sensorSDR, motorSDR, sensorimotorSDR,
                                         tmLearn=True, tpLearn=True,
                                         sequenceLabel=sequenceLabel)
예제 #2
0
  def encodeLetters(self):
    letterEncoder = ScalarEncoder(n=self.numColumns, w=self.numActiveCells, minval=0, maxval=25)

    numLetters = np.shape(self.letters)[0]
    letterArray = np.zeros((numLetters, self.numColumns))
    letterIndices = []
    for k in range(numLetters):
      letterArray[k, :] = letterEncoder.encode(k)
      idxLetters = [i for i, j in izip(count(), letterArray[k]) if j == 1]
      letterIndices.append(idxLetters)

    return letterIndices
예제 #3
0
def loadThingData(dataDir="data", n=150, w=11):
  """
  Load Thing sensation data. There is one file per object, each row contains one
  feature, location pairs. The format is as follows:
    [(-33.6705, 75.5003, 2.4207)/10] => [[list of active bits of location],
                                         [list of active bits of feature]]
  The content before "=>" is the true 3D location / sensation
  We ignore the encoded values after "=>" and use :class:`ScalarEncoder` to
  encode the sensation in a way that is compatible with the experiment network.

  :param dataDir: The location data files
  :type dataDir: str
  :param n: The number of bits in the feature SDR. Usually L4 column count
  :type n: int
  :param w: Number of 'on' bits in the feature SDR. Usually L4 sample size
  :type w: int
  :return: Dictionary mapping objects to sensations that can be used directly by
           class L246aNetwork 'infer' and 'learn' methods
  :rtype: dict[str,list]
  """
  objects = defaultdict(list)

  # Thing features are scalar values ranging from 1-25 inclusive
  encoder = ScalarEncoder(n=n, w=w, minval=1, maxval=25, forced=True)

  dataPath = os.path.dirname(os.path.realpath(__file__))
  dataPath = os.path.join(dataPath, dataDir)
  objFiles = glob.glob1(dataPath, "*.log")

  for filename in objFiles:
    obj, _ = os.path.splitext(filename)

    # Read raw sensations from log file. Ignore SDRs after "=>"
    sensations = []
    with open(os.path.join(dataPath, filename)) as f:
      for line in f.readlines():
        # Parse raw location/feature values
        line = line.split("=>")[0].translate(None, "[,]()")
        locationStr, featureStr = line.split("/")
        location = map(float, locationStr.split())
        feature = encoder.encode(int(featureStr)).nonzero()[0].tolist()

        sensations.append((location, feature))

    # Assume single column
    objects[obj] = [sensations]

  return objects
예제 #4
0
  def encodeTime(self):

    timeEncoder = ScalarEncoder(n=self.numTimeColumns,
                                w=self.numActiveTimeCells,
                                minval=0,
                                maxval=self.numTimeSteps,
                                forced=True)

    timeArray = np.zeros((self.numTimeSteps, self.numTimeColumns))
    timeIndices = []
    for k in range(self.numTimeSteps):
      timeArray[k, :] = timeEncoder.encode(k)
      idxTimes = [i for i, j in izip(count(), timeArray[k]) if j == 1]
      timeIndices.append(idxTimes)

    return timeIndices
예제 #5
0
  def __init__(self,
               w=5,
               minval=1e-07,
               maxval=10000,
               periodic=False,
               n=0,
               radius=0,
               resolution=0,
               name="log",
               verbosity=0,
               clipInput=True,
               forced=False):

    # Lower bound for log encoding near machine precision limit
    lowLimit = 1e-07

    # Limit minval as log10(0) is undefined.
    if minval < lowLimit:
      minval = lowLimit

    # Check that minval is still lower than maxval
    if not minval < maxval:
      raise ValueError("Max val must be larger than min val or the lower limit "
                       "for this encoder %.7f" % lowLimit)

    self.encoders = None
    self.verbosity = verbosity

    # Scale values for calculations within the class
    self.minScaledValue = math.log10(minval)
    self.maxScaledValue = math.log10(maxval)

    if not self.maxScaledValue > self.minScaledValue:
      raise ValueError("Max val must be larger, in log space, than min val.")

    self.clipInput = clipInput
    self.minval = minval
    self.maxval = maxval

    self.encoder = ScalarEncoder(w=w,
                                 minval=self.minScaledValue,
                                 maxval=self.maxScaledValue,
                                 periodic=False,
                                 n=n,
                                 radius=radius,
                                 resolution=resolution,
                                 verbosity=self.verbosity,
                                 clipInput=self.clipInput,
				 forced=forced)
    self.width = self.encoder.getWidth()
    self.description = [(name, 0)]
    self.name = name

    # This list is created by getBucketValues() the first time it is called,
    #  and re-created whenever our buckets would be re-arranged.
    self._bucketValues = None
예제 #6
0
  def __init__(self):
    self.sensorEncoder = ScalarEncoder(n=512, w=21, minval=8.9, maxval=40,
                                       clipInput=True, forced=True)
    self.motorEncoder = ScalarEncoder(n=512, w=21, minval=-400, maxval=400,
                                      clipInput=True, forced=True)

    self.experimentRunner = SensorimotorExperimentRunner(
      tmOverrides={
        "columnDimensions": [512],
        "maxNewSynapseCount": 21*2,
        "minThreshold": 16*2,
        "activationThreshold": 16*2
      },
      tpOverrides={
        "columnDimensions": [512],
        "numActiveColumnsPerInhArea": 20,
        "poolingThreshUnpredicted": 0.5
      }
    )
예제 #7
0
 def read(cls, proto):
   encoder = object.__new__(cls)
   encoder.verbosity = proto.verbosity
   encoder.minScaledValue = round(proto.minScaledValue, EPSILON_ROUND)
   encoder.maxScaledValue = round(proto.maxScaledValue, EPSILON_ROUND)
   encoder.clipInput = proto.clipInput
   encoder.minval = round(proto.minval, EPSILON_ROUND)
   encoder.maxval = round(proto.maxval, EPSILON_ROUND)
   encoder.encoder = ScalarEncoder.read(proto.encoder)
   encoder.name = proto.name
   encoder.width = encoder.encoder.getWidth()
   encoder.description = [(encoder.name, 0)]
   encoder._bucketValues = None
   encoder.encoders = None
   return encoder
예제 #8
0
 def read(cls, proto):
   encoder = object.__new__(cls)
   encoder.verbosity = proto.verbosity
   encoder.minScaledValue = round(proto.minScaledValue, EPSILON_ROUND)
   encoder.maxScaledValue = round(proto.maxScaledValue, EPSILON_ROUND)
   encoder.clipInput = proto.clipInput
   encoder.minval = round(proto.minval, EPSILON_ROUND)
   encoder.maxval = round(proto.maxval, EPSILON_ROUND)
   encoder.encoder = ScalarEncoder.read(proto.encoder)
   encoder.name = proto.name
   encoder.width = encoder.encoder.getWidth()
   encoder.description = [(encoder.name, 0)]
   encoder._bucketValues = None
   encoder.encoders = None
   return encoder
예제 #9
0
def test_sp():
    from nupic.encoders import ScalarEncoder
    from nupic.regions import SPRegion
    columns = 128
    se = ScalarEncoder(n=21 + 50, w=3 + 9, minval=0, maxval=100, forced=True)
    queue = cl.CommandQueue(
        cl.Context([cl.get_platforms()[0].get_devices()[0]]))
    sp = SpatialPooler(queue,
                       columnCount=columns,
                       inputWidth=se.n,
                       spVerbosity=1)
    sp_nupic = SPRegion.SPRegion(columnCount=columns, inputWidth=se.n)

    val = 1
    # return
    for _ in range(0, 2):
        for i in range(0, 10):
            encoding = se.encode(val)
            bucketIdx = se.getBucketIndices(val)[0]
            print("Actual Value: {} , Active Bits: {}, BucketIdx: {}".format(
                val, np.where(encoding == 1), bucketIdx))
            sp.compute(encoding, True, method=2)
            val += 0.5
            print("-" * 10)
예제 #10
0
def createEncoder():
    """Create the encoder instance for our test and return it."""
    consumption_encoder = ScalarEncoder(21,
                                        0.0,
                                        100.0,
                                        n=50,
                                        name="consumption",
                                        clipInput=True)
    time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay")

    encoder = MultiEncoder()
    encoder.addEncoder("consumption", consumption_encoder)
    encoder.addEncoder("timestamp", time_encoder)

    return encoder
예제 #11
0
def createEncoder():

    diagCoorA_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1324,
                                      name="diagCoorA",
                                      clipInput=False,
                                      forced=True)
    diagCoorB_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1324,
                                      name="diagCoorB",
                                      clipInput=False,
                                      forced=True)

    global encoder
    encoder = MultiEncoder()

    encoder.addEncoder("diagCoorA", diagCoorA_encoder)
    encoder.addEncoder("diagCoorB", diagCoorB_encoder)

    return encoder
예제 #12
0
def compare_overlap():
    print("Using device: ", device)

    from nupic.encoders import ScalarEncoder
    from nupic.regions import SPRegion
    cols = 2048
    se = ScalarEncoder(n=1024,
                       w=33,
                       minval=0,
                       maxval=20,
                       forced=True,
                       clipInput=True,
                       name='testInput')
    queue = cl.CommandQueue(cl.Context([device]))
    potentialPct = 0.25
    sp_nupic = SPRegion.SPRegion(columnCount=cols,
                                 inputWidth=se.n,
                                 spatialImp='py',
                                 spVerbosity=1,
                                 potentialPct=potentialPct)
    sp_cl = SpatialPooler(queue,
                          columnCount=cols,
                          inputWidth=se.n,
                          spVerbosity=1,
                          inputActive=se.w,
                          potentialPct=potentialPct)
    sp_nupic.initialize(None, None)
    lim = 1
    print("\ntesting nupic")
    test_nupic(sp_nupic, se, lim)
    print("testing cl loop all")
    test_cl_loop_all(sp_cl, se, lim)

    # print("testing cl bit idx")
    # test_cl_idx(sp_cl, se, lim)
    #
    # sp_cl.dump_kernel_info()
    print("testing cl column ")
    test_cl_overlap_all_synapse(sp_cl, se, lim)

    print("Testing numpy")
    test_numpy_idx(sp_cl, se, lim)
    print("testing inverse")
    test_input_inverse(sp_cl, se, lim)

    print("testing cl for loop bin search")
    test_cl_loop_bin(sp_cl, se, lim)
예제 #13
0
    def initialize(self, useRandomEncoder):
        """
    Initialize the various data structures.
    """
        self.setRandomSeed(self.seed)

        self.dim = numpy.shape(self.spatialConfig)[-1]

        self.spatialMap = dict(
            zip(map(tuple, list(self.spatialConfig)),
                self.sensoryInputElements))

        self.lengthMotorInput1D = (2*self.maxDisplacement + 1) * \
                                                        self.numActiveBitsMotorInput

        uniqueSensoryElements = list(set(self.sensoryInputElementsPool))

        if useRandomEncoder:
            self.sensoryEncoder = SDRCategoryEncoder(
                n=1024,
                w=self.numActiveBitsSensoryInput,
                categoryList=uniqueSensoryElements,
                forced=True)
            self.lengthSensoryInput = self.sensoryEncoder.getWidth()

        else:
            self.lengthSensoryInput = (len(self.sensoryInputElementsPool)+1) * \
                                                self.numActiveBitsSensoryInput

            self.sensoryEncoder = CategoryEncoder(
                w=self.numActiveBitsSensoryInput,
                categoryList=uniqueSensoryElements,
                forced=True)

        motorEncoder1D = ScalarEncoder(n=self.lengthMotorInput1D,
                                       w=self.numActiveBitsMotorInput,
                                       minval=-self.maxDisplacement,
                                       maxval=self.maxDisplacement,
                                       clipInput=True,
                                       forced=True)

        self.motorEncoder = VectorEncoder(length=self.dim,
                                          encoder=motorEncoder1D)
예제 #14
0
def createEncoder():
    #volume_encoder = ScalarEncoder(7, 0.0, 70.0, n=200, name="volume", clipInput=False, forced=True)
    #floorheight_encoder = ScalarEncoder(1, 0.0, 70.0, n=25, name="floorheight", clipInput=False, forced=True)

    diagCoorA_encoder = ScalarEncoder(257,
                                      0.0,
                                      200.0,
                                      n=2048,
                                      name="diagCoorA",
                                      clipInput=False,
                                      forced=True)
    #diagCoorB_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorB", clipInput=False, forced=True)
    #diagCoorC_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorC", clipInput=False, forced=True)
    #diagCoorD_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorD", clipInput=False, forced=True)
    #diagCoorE_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorE", clipInput=False, forced=True)
    #diagCoorF_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorF", clipInput=False, forced=True)
    #diagCoorG_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorG", clipInput=False, forced=True)
    #diagCoorH_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorH", clipInput=False, forced=True)
    #diagCoorI_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorI", clipInput=False, forced=True)
    #diagCoorJ_encoder = ScalarEncoder(157, 0.0, 200.0, n=2000, name="diagCoorJ", clipInput=False, forced=True)

    global encoder
    encoder = MultiEncoder()

    #encoder.addEncoder("volume", volume_encoder)
    #encoder.addEncoder("floorheight", floorheight_encoder)
    encoder.addEncoder("diagCoorA", diagCoorA_encoder)
    #encoder.addEncoder("diagCoorB", diagCoorB_encoder)
    #encoder.addEncoder("diagCoorC", diagCoorC_encoder)
    #encoder.addEncoder("diagCoorD", diagCoorD_encoder)
    #encoder.addEncoder("diagCoorE", diagCoorE_encoder)
    #encoder.addEncoder("diagCoorF", diagCoorF_encoder)
    #encoder.addEncoder("diagCoorG", diagCoorG_encoder)
    #encoder.addEncoder("diagCoorH", diagCoorH_encoder)
    #encoder.addEncoder("diagCoorI", diagCoorI_encoder)
    #encoder.addEncoder("diagCoorJ", diagCoorJ_encoder)

    return encoder
예제 #15
0
def test_cla_se():
    from nupic.encoders import ScalarEncoder
    from nupic.algorithms.CLAClassifier import CLAClassifier as npCLAClassifier

    se = ScalarEncoder(n=10, w=3, minval=0, maxval=20, forced=True)
    queue = cl.CommandQueue(cl.Context([cl.get_platforms()[0].get_devices()[0]]))
    classifier = CLAClassifier(queue, numbuckets=len(se.getBucketValues()), bits=se.n, verbosity=True)
    np_cla = npCLAClassifier(verbosity=1)
    print("Buckets", se.getBucketValues())
    val = 5
    for _ in range(0, 2):
        for i in range(0, 10):
            encoding = np.where(se.encode(val) == 1)[0]
            bucketIdx = se.getBucketIndices(val)[0]
            print("Actual Value: {} , Active Bits: {}, BucketIdx: {}".format(val, encoding, bucketIdx))
            cl_preds = classifier.compute(i, encoding, bucketIdx, val, True, True)
            nupic_preds = np_cla.compute(i, encoding, {'bucketIdx': bucketIdx, 'actValue': val}, True, True)
            print("cl", cl_preds)
            print("nup", np_cla._actualValues)
            print("nup", nupic_preds)
            # assert cl_preds == nupic_preds
            val += 0.5
            print("-" * 32)
예제 #16
0
    def testMultiEncoder(self):
        """Testing MultiEncoder..."""

        e = MultiEncoder()

        # should be 7 bits wide
        # use of forced=True is not recommended, but here for readibility, see
        # scalar.py
        e.addEncoder(
            "dow",
            ScalarEncoder(w=3,
                          resolution=1,
                          minval=1,
                          maxval=8,
                          periodic=True,
                          name="day of week",
                          forced=True))
        # sould be 14 bits wide
        e.addEncoder(
            "myval",
            ScalarEncoder(w=5,
                          resolution=1,
                          minval=1,
                          maxval=10,
                          periodic=False,
                          name="aux",
                          forced=True))
        self.assertEqual(e.getWidth(), 21)
        self.assertEqual(e.getDescription(), [("day of week", 0), ("aux", 7)])

        d = DictObj(dow=3, myval=10)
        expected = numpy.array([0, 1, 1, 1, 0, 0, 0] +
                               [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
                               dtype="uint8")
        output = e.encode(d)
        self.assertTrue(numpy.array_equal(expected, output))

        # Check decoding
        decoded = e.decode(output)
        self.assertEqual(len(decoded), 2)
        (ranges, _) = decoded[0]["aux"]
        self.assertEqual(len(ranges), 1)
        self.assertTrue(numpy.array_equal(ranges[0], [10, 10]))
        (ranges, _) = decoded[0]["day of week"]
        self.assertTrue(
            len(ranges) == 1 and numpy.array_equal(ranges[0], [3, 3]))

        e.addEncoder(
            "myCat",
            SDRCategoryEncoder(n=7,
                               w=3,
                               categoryList=["run", "pass", "kick"],
                               forced=True))

        d = DictObj(dow=4, myval=6, myCat="pass")
        output = e.encode(d)
        topDownOut = e.topDownCompute(output)
        self.assertAlmostEqual(topDownOut[0].value, 4.5)
        self.assertEqual(topDownOut[1].value, 6.0)
        self.assertEqual(topDownOut[2].value, "pass")
        self.assertEqual(topDownOut[2].scalar, 2)
        self.assertEqual(topDownOut[2].encoding.sum(), 3)
예제 #17
0
파일: description.py 프로젝트: zacg/nupic
def getDescription(datasets):

  # ========================================================================
  # Encoder for the sensor
  encoder = MultiEncoder()

  if config['encodingFieldStyleA'] == 'contiguous':
    encoder.addEncoder('fieldA', ScalarEncoder(w=config['encodingOnBitsA'],
                        n=config['encodingFieldWidthA'], minval=0,
                        maxval=config['numAValues'], periodic=True, name='fieldA'))
  elif config['encodingFieldStyleA'] == 'sdr':
    encoder.addEncoder('fieldA', SDRCategoryEncoder(w=config['encodingOnBitsA'],
                        n=config['encodingFieldWidthA'],
                        categoryList=range(config['numAValues']), name='fieldA'))
  else:
    assert False


  if config['encodingFieldStyleB'] == 'contiguous':
    encoder.addEncoder('fieldB', ScalarEncoder(w=config['encodingOnBitsB'], 
                      n=config['encodingFieldWidthB'], minval=0, 
                      maxval=config['numBValues'], periodic=True, name='fieldB'))
  elif config['encodingFieldStyleB'] == 'zero':
    encoder.addEncoder('fieldB', SDRRandomEncoder(w=0, n=config['encodingFieldWidthB'], 
                      name='fieldB'))
  elif config['encodingFieldStyleB'] == 'sdr':
    encoder.addEncoder('fieldB', SDRCategoryEncoder(w=config['encodingOnBitsB'], 
                      n=config['encodingFieldWidthB'], 
                      categoryList=range(config['numBValues']), name='fieldB'))
  else:
    assert False



  # ========================================================================
  # Network definition


  # ------------------------------------------------------------------
  # Node params
  # The inputs are long, horizontal vectors
  inputShape = (1, encoder.getWidth())

  # Layout the coincidences vertically stacked on top of each other, each
  # looking at the entire input field. 
  coincidencesShape = (config['spCoincCount'], 1)
  inputBorder = inputShape[1]/2
  if inputBorder*2 >= inputShape[1]:
    inputBorder -= 1

  sensorParams = dict(
    # encoder/datasource are not parameters so don't include here
    verbosity=config['sensorVerbosity']
  )

  CLAParams = dict(
    inputShape = inputShape,
    inputBorder = inputBorder,
    coincidencesShape = coincidencesShape,
    coincInputRadius = inputShape[1]/2,
    coincInputPoolPct = 1.0,
    gaussianDist = 0,
    commonDistributions = 0,    # should be False if possibly not training
    localAreaDensity = -1, #0.05, 
    numActivePerInhArea = config['spNumActivePerInhArea'],
    dutyCyclePeriod = 1000,
    stimulusThreshold = 1,
    synPermInactiveDec = config['spSynPermInactiveDec'],
    synPermActiveInc = 0.02,
    synPermActiveSharedDec=0.0,
    synPermOrphanDec = 0.0,
    minPctDutyCycleBeforeInh = 0.001,
    minPctDutyCycleAfterInh = config['spMinPctDutyCycleAfterInh'],
    minDistance = 0.05,
    computeTopDown = 1,
    spVerbosity = config['spVerbosity'],
    spSeed = 1,
    printPeriodicStats = int(config['spPeriodicStats']),

    # TP params
    disableTemporal = 1,

    # General params
    trainingStep = 'spatial',
    )

  trainingDataSource = FileRecordStream(datasets['trainingFilename'])


  description = dict(
    options = dict(
      logOutputsDuringInference = False,
    ),

    network = dict(
      sensorDataSource = trainingDataSource,
      sensorEncoder = encoder, 
      sensorParams = sensorParams,

      CLAType = 'py.CLARegion',
      CLAParams = CLAParams,

      classifierType = None,
      classifierParams = None),

  )

  if config['trainSP']:
    description['spTrain'] = dict(
      iterationCount=config['iterationCount'], 
      #iter=displaySPCoincidences(50),
      finish=printSPCoincidences()
      ),
  else:
    description['spTrain'] = dict(
      # need to train with one iteration just to initialize data structures
      iterationCount=1)



  # ============================================================================
  # Inference tests
  inferSteps = []

  # ----------------------------------------
  # Training dataset
  if True:
    datasetName = 'bothTraining'
    inferSteps.append(
      dict(name = '%s_baseline' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['trainingFilename'])],
           ppOptions = dict(printLearnedCoincidences=True),
          )
      )

    inferSteps.append(
      dict(name = '%s_acc' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['trainingFilename'])],
           ppOptions = dict(onlyClassificationAcc=True,
                            tpActivationThresholds=config['tpActivationThresholds'],
                            computeDistances=True,
                            verbosity = 1),
          )
      )

  # ----------------------------------------
  # Testing dataset
  if 'testingFilename' in datasets:
    datasetName = 'bothTesting'
    inferSteps.append(
      dict(name = '%s_baseline' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['testingFilename'])],
           ppOptions = dict(printLearnedCoincidences=False),
          )
      )

    inferSteps.append(
      dict(name = '%s_acc' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['testingFilename'])],
           ppOptions = dict(onlyClassificationAcc=True,
                            tpActivationThresholds=config['tpActivationThresholds']),
          )
      )


  description['infer'] = inferSteps

  return description
예제 #18
0
class LogEncoder(Encoder):
  """
  This class wraps the :class:`.ScalarEncoder`.

  A Log encoder represents a floating point value on a logarithmic scale.

  .. code-block:: python

     valueToEncode = log10(input)

  :param resolution: The minimum change in scaled value needed to produce a
                     change in encoding. This should be specified in log space.
                     For example, the scaled values 10 and 11 will be
                     distinguishable in the output. In terms of the original
                     input values, this means 10^1 (1) and 10^1.1 (1.25) will be
                     distinguishable.
  :param radius: inputs separated by more than this distance in log space will
                 have non-overlapping representations
  """

  def __init__(self,
               w=5,
               minval=1e-07,
               maxval=10000,
               periodic=False,
               n=0,
               radius=0,
               resolution=0,
               name="log",
               verbosity=0,
               clipInput=True,
               forced=False):

    # Lower bound for log encoding near machine precision limit
    lowLimit = 1e-07

    # Limit minval as log10(0) is undefined.
    if minval < lowLimit:
      minval = lowLimit

    # Check that minval is still lower than maxval
    if not minval < maxval:
      raise ValueError("Max val must be larger than min val or the lower limit "
                       "for this encoder %.7f" % lowLimit)

    self.encoders = None
    self.verbosity = verbosity

    # Scale values for calculations within the class
    self.minScaledValue = math.log10(minval)
    self.maxScaledValue = math.log10(maxval)

    if not self.maxScaledValue > self.minScaledValue:
      raise ValueError("Max val must be larger, in log space, than min val.")

    self.clipInput = clipInput
    self.minval = minval
    self.maxval = maxval

    self.encoder = ScalarEncoder(w=w,
                                 minval=self.minScaledValue,
                                 maxval=self.maxScaledValue,
                                 periodic=False,
                                 n=n,
                                 radius=radius,
                                 resolution=resolution,
                                 verbosity=self.verbosity,
                                 clipInput=self.clipInput,
				 forced=forced)
    self.width = self.encoder.getWidth()
    self.description = [(name, 0)]
    self.name = name

    # This list is created by getBucketValues() the first time it is called,
    #  and re-created whenever our buckets would be re-arranged.
    self._bucketValues = None


  def getWidth(self):
    return self.width


  def getDescription(self):
    return self.description


  def getDecoderOutputFieldTypes(self):
    """
    Encoder class virtual method override
    """
    return (FieldMetaType.float, )


  def _getScaledValue(self, inpt):
    """
    Convert the input, which is in normal space, into log space
    """
    if inpt == SENTINEL_VALUE_FOR_MISSING_DATA:
      return None
    else:
      val = inpt
      if val < self.minval:
        val = self.minval
      elif val > self.maxval:
        val = self.maxval

      scaledVal = math.log10(val)
      return scaledVal


  def getBucketIndices(self, inpt):
    """
    See the function description in base.py
    """

    # Get the scaled value
    scaledVal = self._getScaledValue(inpt)

    if scaledVal is None:
      return [None]
    else:
      return self.encoder.getBucketIndices(scaledVal)


  def encodeIntoArray(self, inpt, output):
    """
    See the function description in base.py
    """

    # Get the scaled value
    scaledVal = self._getScaledValue(inpt)

    if scaledVal is None:
      output[0:] = 0
    else:
      self.encoder.encodeIntoArray(scaledVal, output)

      if self.verbosity >= 2:
        print "input:", inpt, "scaledVal:", scaledVal, "output:", output
        print "decoded:", self.decodedToStr(self.decode(output))


  def decode(self, encoded, parentFieldName=''):
    """
    See the function description in base.py
    """

    # Get the scalar values from the underlying scalar encoder
    (fieldsDict, fieldNames) = self.encoder.decode(encoded)
    if len(fieldsDict) == 0:
      return (fieldsDict, fieldNames)

    # Expect only 1 field
    assert(len(fieldsDict) == 1)

    # Convert each range into normal space
    (inRanges, inDesc) = fieldsDict.values()[0]
    outRanges = []
    for (minV, maxV) in inRanges:
      outRanges.append((math.pow(10, minV),
                        math.pow(10, maxV)))

    # Generate a text description of the ranges
    desc = ""
    numRanges = len(outRanges)
    for i in xrange(numRanges):
      if outRanges[i][0] != outRanges[i][1]:
        desc += "%.2f-%.2f" % (outRanges[i][0], outRanges[i][1])
      else:
        desc += "%.2f" % (outRanges[i][0])
      if i < numRanges-1:
        desc += ", "

    # Return result
    if parentFieldName != '':
      fieldName = "%s.%s" % (parentFieldName, self.name)
    else:
      fieldName = self.name
    return ({fieldName: (outRanges, desc)}, [fieldName])


  def getBucketValues(self):
    """
    See the function description in base.py
    """

    # Need to re-create?
    if self._bucketValues is None:
      scaledValues = self.encoder.getBucketValues()
      self._bucketValues = []
      for scaledValue in scaledValues:
        value = math.pow(10, scaledValue)
        self._bucketValues.append(value)

    return self._bucketValues


  def getBucketInfo(self, buckets):
    """
    See the function description in base.py
    """

    scaledResult = self.encoder.getBucketInfo(buckets)[0]
    scaledValue = scaledResult.value
    value = math.pow(10, scaledValue)

    return [EncoderResult(value=value, scalar=value,
                         encoding = scaledResult.encoding)]


  def topDownCompute(self, encoded):
    """
    See the function description in base.py
    """

    scaledResult = self.encoder.topDownCompute(encoded)[0]
    scaledValue = scaledResult.value
    value = math.pow(10, scaledValue)

    return EncoderResult(value=value, scalar=value,
                         encoding = scaledResult.encoding)


  def closenessScores(self, expValues, actValues, fractional=True):
    """
    See the function description in base.py
    """

    # Compute the percent error in log space
    if expValues[0] > 0:
      expValue = math.log10(expValues[0])
    else:
      expValue = self.minScaledValue

    if actValues  [0] > 0:
      actValue = math.log10(actValues[0])
    else:
      actValue = self.minScaledValue

    if fractional:
      err = abs(expValue - actValue)
      pctErr = err / (self.maxScaledValue - self.minScaledValue)
      pctErr = min(1.0, pctErr)
      closeness = 1.0 - pctErr
    else:
      err = abs(expValue - actValue)
      closeness = err

    #print "log::", "expValue:", expValues[0], "actValue:", actValues[0], \
    #      "closeness", closeness
    #import pdb; pdb.set_trace()
    return numpy.array([closeness])


  @classmethod
  def getSchema(cls):
    return LogEncoderProto

  @classmethod
  def read(cls, proto):
    encoder = object.__new__(cls)
    encoder.verbosity = proto.verbosity
    encoder.minScaledValue = round(proto.minScaledValue, EPSILON_ROUND)
    encoder.maxScaledValue = round(proto.maxScaledValue, EPSILON_ROUND)
    encoder.clipInput = proto.clipInput
    encoder.minval = round(proto.minval, EPSILON_ROUND)
    encoder.maxval = round(proto.maxval, EPSILON_ROUND)
    encoder.encoder = ScalarEncoder.read(proto.encoder)
    encoder.name = proto.name
    encoder.width = encoder.encoder.getWidth()
    encoder.description = [(encoder.name, 0)]
    encoder._bucketValues = None
    encoder.encoders = None
    return encoder


  def write(self, proto):
    proto.verbosity = self.verbosity
    proto.minScaledValue = self.minScaledValue
    proto.maxScaledValue = self.maxScaledValue
    proto.clipInput = self.clipInput
    proto.minval = self.minval
    proto.maxval = self.maxval
    self.encoder.write(proto.encoder)
    proto.name = self.name
예제 #19
0
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

import pyaudio
import audioop
import math
from nupic.encoders import ScalarEncoder
from nupic.research.TP import TP
from termcolor import colored

# Create our NuPIC entities

enc = ScalarEncoder(n=50, w=3, minval=0, maxval=100,
						clipInput=True, forced=True)

tp = TP(numberOfCols=50, cellsPerColumn=4, initialPerm=0.5,
		connectedPerm=0.5, minThreshold=5, newSynapseCount=5,
		permanenceInc=0.1, permanenceDec=0.1,
        activationThreshold=3, globalDecay=0.1, burnIn=1,
        checkSynapseConsistency=False, pamLength=3)

# Setup our PyAudio Stream

p = pyaudio.PyAudio()
stream = p.open(format = pyaudio.paInt16, channels = 1,
	rate = int(p.get_device_info_by_index(0)['defaultSampleRate']),
	input = True, frames_per_buffer = 1024*5)

print "%-48s %48s" % (colored("DECIBELS","green"),
예제 #20
0
파일: test_tp.py 프로젝트: kyuelin/hacks
        month = Month(date)
        self.dayOfWeek = date.weekday()
        self.dayOfMonth = date.day
        self.firstLastOfMonth = 0 if date.day == 1 else 2 if date.day == month.last_day else 1
        self.weekOfMonth = get_week_of_month(date)
        self.yearOfDecade = date.year % 10
        self.monthOfYear = date.month
        self.quarterOfYear = month.quarter
        self.halfOfYear = month.half


if __name__ == "__main__":
    day_of_week_enc = ScalarEncoder(w=3,
                                    minval=0,
                                    maxval=7,
                                    radius=1.5,
                                    periodic=True,
                                    name="dayOfWeek",
                                    forced=True)
    day_of_month_enc = ScalarEncoder(w=3,
                                     minval=1,
                                     maxval=31,
                                     radius=1.5,
                                     periodic=False,
                                     name="dayOfMonth",
                                     forced=True)
    first_last_of_month_enc = ScalarEncoder(w=1,
                                            minval=0,
                                            maxval=2,
                                            radius=1,
                                            periodic=False,
예제 #21
0
파일: Overview.py 프로젝트: MichoelSnow/HTM
Created on Sun Apr 30 16:19:23 2017

@author: BJ
"""
from __future__ import absolute_import, division, print_function
import numpy
numpy.set_printoptions(threshold=numpy.nan)
from nupic.encoders import ScalarEncoder

# Scalar encoders
# n is number of bits
# w is the number of on bits
# minval and maxval is the range the bits represent
enc = ScalarEncoder(n=22,
                    w=3,
                    minval=2.5,
                    maxval=97.5,
                    clipInput=True,
                    forced=True)
enc = ScalarEncoder(n=22,
                    w=3,
                    minval=0,
                    maxval=100,
                    clipInput=True,
                    forced=True)
[print(enc.encode(i)) for i in xrange(1, 10)]
print("3 =", enc.encode(10200))

enc = ScalarEncoder(n=14,
                    w=3,
                    minval=1,
                    maxval=8,
예제 #22
0
    #
    #output = nupic_output.NuPICFileOutput([dataSet])
    # skips = 0
    truths = []
    # predictions = []
    loop_length = len(df) if limit_to is None else limit_to

    pred_n = 109
    date_n = 100
    time_n = 600#600#(29+48-1) * 3
    use_pred = True
    use_date = True
    use_time = True
    total_n = (pred_n if use_pred else 0) + (date_n if use_date else 0) + (time_n if use_time else 0)
    buckets = 22
    enc = ScalarEncoder(29, minval=0, maxval=40000, n=pred_n)
    encDate = ScalarEncoder(29, minval=0, maxval=7, n=date_n,)
    encTime = ScalarEncoder(29, minval=0, maxval=1411, n=time_n)
    encOut = ScalarEncoder(29, minval=0, maxval=40000, n=50)
    from_command = False
    if from_command:
        nTrain = int(argv[3])
        batch = int(argv[4])
        epochs = int(argv[5])
        epochs_retrain = int(argv[6])
        lr = float(argv[7])
        verbose = False
    else:
        nTrain = 4000
        batch = 1024
        lr = 0.005
예제 #23
0
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

import pyaudio
import audioop
import math
from nupic.encoders import ScalarEncoder
from nupic.research.TP import TP
from termcolor import colored

# Create our NuPIC entities

enc = ScalarEncoder(n=50,
                    w=3,
                    minval=0,
                    maxval=100,
                    clipInput=True,
                    forced=True)

tp = TP(numberOfCols=50,
        cellsPerColumn=4,
        initialPerm=0.5,
        connectedPerm=0.5,
        minThreshold=5,
        newSynapseCount=5,
        permanenceInc=0.1,
        permanenceDec=0.1,
        activationThreshold=3,
        globalDecay=0.1,
        burnIn=1,
        checkSynapseConsistency=False,
예제 #24
0
    def testSimpleMulticlassNetwork(self):

        # Setup data record stream of fake data (with three categories)
        filename = _getTempFileName()
        fields = [("timestamp", "datetime", "T"), ("value", "float", ""),
                  ("reset", "int", "R"), ("sid", "int", "S"),
                  ("categories", "list", "C")]
        records = ([datetime(day=1, month=3, year=2010), 0.0, 1, 0, ""], [
            datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1 2"
        ], [datetime(day=3, month=3, year=2010), 1.0, 0, 0,
            "1 2"], [datetime(day=4, month=3, year=2010), 2.0, 0, 0, "0"], [
                datetime(day=5, month=3, year=2010), 3.0, 0, 0, "1 2"
            ], [datetime(day=6, month=3, year=2010), 5.0, 0, 0,
                "1 2"], [datetime(day=7, month=3, year=2010), 8.0, 0, 0, "0"],
                   [datetime(day=8, month=3, year=2010), 13.0, 0, 0, "1 2"])
        dataSource = FileRecordStream(streamID=filename,
                                      write=True,
                                      fields=fields)
        for r in records:
            dataSource.appendRecord(list(r))

        # Create the network and get region instances.
        net = Network()
        net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
        net.addRegion("classifier", "py.KNNClassifierRegion",
                      "{'k': 2,'distThreshold': 0,'maxCategoryCount': 3}")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="dataOut",
                 destInput="bottomUpIn")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="categoryOut",
                 destInput="categoryIn")
        sensor = net.regions["sensor"]
        classifier = net.regions["classifier"]

        # Setup sensor region encoder and data stream.
        dataSource.close()
        dataSource = FileRecordStream(filename)
        sensorRegion = sensor.getSelf()
        sensorRegion.encoder = MultiEncoder()
        sensorRegion.encoder.addEncoder(
            "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
        sensorRegion.dataSource = dataSource

        # Get ready to run.
        net.initialize()

        # Train the network (by default learning is ON in the classifier, but assert
        # anyway) and then turn off learning and turn on inference mode.
        self.assertEqual(classifier.getParameter("learningMode"), 1)
        net.run(8)
        classifier.setParameter("inferenceMode", 1)
        classifier.setParameter("learningMode", 0)

        # Assert learning is OFF and that the classifier learned the dataset.
        self.assertEqual(classifier.getParameter("learningMode"), 0,
                         "Learning mode is not turned off.")
        self.assertEqual(classifier.getParameter("inferenceMode"), 1,
                         "Inference mode is not turned on.")
        self.assertEqual(
            classifier.getParameter("categoryCount"), 3,
            "The classifier should count three total categories.")
        # classififer learns 12 patterns b/c there are 12 categories amongst the
        # records:
        self.assertEqual(
            classifier.getParameter("patternCount"), 12,
            "The classifier should've learned 12 samples in total.")

        # Test the network on the same data as it trained on; should classify with
        # 100% accuracy.
        expectedCats = ([0.0, 0.5, 0.5], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5],
                        [0.5, 0.5, 0.0], [0.0, 0.5,
                                          0.5], [0.0, 0.5,
                                                 0.5], [0.5, 0.5,
                                                        0.0], [0.0, 0.5, 0.5])
        dataSource.rewind()
        for i in xrange(8):
            net.run(1)
            inferredCats = classifier.getOutputData("categoriesOut")
            self.assertSequenceEqual(
                expectedCats[i], inferredCats.tolist(),
                "Classififer did not infer expected category probabilites for record "
                "number {}.".format(i))

        # Close data stream, delete file.
        dataSource.close()
        os.remove(filename)
예제 #25
0
class LogEncoder(Encoder):
    """
  This class wraps the :class:`.ScalarEncoder`.

  A Log encoder represents a floating point value on a logarithmic scale.

  .. code-block:: python

     valueToEncode = log10(input)

  :param resolution: The minimum change in scaled value needed to produce a
                     change in encoding. This should be specified in log space.
                     For example, the scaled values 10 and 11 will be
                     distinguishable in the output. In terms of the original
                     input values, this means 10^1 (1) and 10^1.1 (1.25) will be
                     distinguishable.
  :param radius: inputs separated by more than this distance in log space will
                 have non-overlapping representations
  """
    def __init__(self,
                 w=5,
                 minval=1e-07,
                 maxval=10000,
                 periodic=False,
                 n=0,
                 radius=0,
                 resolution=0,
                 name="log",
                 verbosity=0,
                 clipInput=True,
                 forced=False):

        # Lower bound for log encoding near machine precision limit
        lowLimit = 1e-07

        # Limit minval as log10(0) is undefined.
        if minval < lowLimit:
            minval = lowLimit

        # Check that minval is still lower than maxval
        if not minval < maxval:
            raise ValueError(
                "Max val must be larger than min val or the lower limit "
                "for this encoder %.7f" % lowLimit)

        self.encoders = None
        self.verbosity = verbosity

        # Scale values for calculations within the class
        self.minScaledValue = math.log10(minval)
        self.maxScaledValue = math.log10(maxval)

        if not self.maxScaledValue > self.minScaledValue:
            raise ValueError(
                "Max val must be larger, in log space, than min val.")

        self.clipInput = clipInput
        self.minval = minval
        self.maxval = maxval

        self.encoder = ScalarEncoder(w=w,
                                     minval=self.minScaledValue,
                                     maxval=self.maxScaledValue,
                                     periodic=False,
                                     n=n,
                                     radius=radius,
                                     resolution=resolution,
                                     verbosity=self.verbosity,
                                     clipInput=self.clipInput,
                                     forced=forced)
        self.width = self.encoder.getWidth()
        self.description = [(name, 0)]
        self.name = name

        # This list is created by getBucketValues() the first time it is called,
        #  and re-created whenever our buckets would be re-arranged.
        self._bucketValues = None

    def getWidth(self):
        return self.width

    def getDescription(self):
        return self.description

    def getDecoderOutputFieldTypes(self):
        """
    Encoder class virtual method override
    """
        return (FieldMetaType.float, )

    def _getScaledValue(self, inpt):
        """
    Convert the input, which is in normal space, into log space
    """
        if inpt == SENTINEL_VALUE_FOR_MISSING_DATA:
            return None
        else:
            val = inpt
            if val < self.minval:
                val = self.minval
            elif val > self.maxval:
                val = self.maxval

            scaledVal = math.log10(val)
            return scaledVal

    def getBucketIndices(self, inpt):
        """
    See the function description in base.py
    """

        # Get the scaled value
        scaledVal = self._getScaledValue(inpt)

        if scaledVal is None:
            return [None]
        else:
            return self.encoder.getBucketIndices(scaledVal)

    def encodeIntoArray(self, inpt, output):
        """
    See the function description in base.py
    """

        # Get the scaled value
        scaledVal = self._getScaledValue(inpt)

        if scaledVal is None:
            output[0:] = 0
        else:
            self.encoder.encodeIntoArray(scaledVal, output)

            if self.verbosity >= 2:
                print("input:", inpt, "scaledVal:", scaledVal, "output:",
                      output)
                print("decoded:", self.decodedToStr(self.decode(output)))

    def decode(self, encoded, parentFieldName=''):
        """
    See the function description in base.py
    """

        # Get the scalar values from the underlying scalar encoder
        (fieldsDict, fieldNames) = self.encoder.decode(encoded)
        if len(fieldsDict) == 0:
            return (fieldsDict, fieldNames)

        # Expect only 1 field
        assert (len(fieldsDict) == 1)

        # Convert each range into normal space
        (inRanges, inDesc) = list(fieldsDict.values())[0]
        outRanges = []
        for (minV, maxV) in inRanges:
            outRanges.append((math.pow(10, minV), math.pow(10, maxV)))

        # Generate a text description of the ranges
        desc = ""
        numRanges = len(outRanges)
        for i in range(numRanges):
            if outRanges[i][0] != outRanges[i][1]:
                desc += "%.2f-%.2f" % (outRanges[i][0], outRanges[i][1])
            else:
                desc += "%.2f" % (outRanges[i][0])
            if i < numRanges - 1:
                desc += ", "

        # Return result
        if parentFieldName != '':
            fieldName = "%s.%s" % (parentFieldName, self.name)
        else:
            fieldName = self.name
        return ({fieldName: (outRanges, desc)}, [fieldName])

    def getBucketValues(self):
        """
    See the function description in base.py
    """

        # Need to re-create?
        if self._bucketValues is None:
            scaledValues = self.encoder.getBucketValues()
            self._bucketValues = []
            for scaledValue in scaledValues:
                value = math.pow(10, scaledValue)
                self._bucketValues.append(value)

        return self._bucketValues

    def getBucketInfo(self, buckets):
        """
    See the function description in base.py
    """

        scaledResult = self.encoder.getBucketInfo(buckets)[0]
        scaledValue = scaledResult.value
        value = math.pow(10, scaledValue)

        return [
            EncoderResult(value=value,
                          scalar=value,
                          encoding=scaledResult.encoding)
        ]

    def topDownCompute(self, encoded):
        """
    See the function description in base.py
    """

        scaledResult = self.encoder.topDownCompute(encoded)[0]
        scaledValue = scaledResult.value
        value = math.pow(10, scaledValue)

        return EncoderResult(value=value,
                             scalar=value,
                             encoding=scaledResult.encoding)

    def closenessScores(self, expValues, actValues, fractional=True):
        """
    See the function description in base.py
    """

        # Compute the percent error in log space
        if expValues[0] > 0:
            expValue = math.log10(expValues[0])
        else:
            expValue = self.minScaledValue

        if actValues[0] > 0:
            actValue = math.log10(actValues[0])
        else:
            actValue = self.minScaledValue

        if fractional:
            err = abs(expValue - actValue)
            pctErr = err / (self.maxScaledValue - self.minScaledValue)
            pctErr = min(1.0, pctErr)
            closeness = 1.0 - pctErr
        else:
            err = abs(expValue - actValue)
            closeness = err

        #print "log::", "expValue:", expValues[0], "actValue:", actValues[0], \
        #      "closeness", closeness
        #import pdb; pdb.set_trace()
        return numpy.array([closeness])

    @classmethod
    def read(cls, proto):
        encoder = object.__new__(cls)
        encoder.verbosity = proto.verbosity
        encoder.minScaledValue = proto.minScaledValue
        encoder.maxScaledValue = proto.maxScaledValue
        encoder.clipInput = proto.clipInput
        encoder.minval = proto.minval
        encoder.maxval = proto.maxval
        encoder.encoder = ScalarEncoder.read(proto.encoder)
        encoder.name = proto.name
        encoder.width = encoder.encoder.getWidth()
        encoder.description = [(encoder.name, 0)]
        encoder._bucketValues = None
        return encoder

    def write(self, proto):
        proto.verbosity = self.verbosity
        proto.minScaledValue = self.minScaledValue
        proto.maxScaledValue = self.maxScaledValue
        proto.clipInput = self.clipInput
        proto.minval = self.minval
        proto.maxval = self.maxval
        self.encoder.write(proto.encoder)
        proto.name = self.name
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 11 22:11:49 2018

@author: Arunodhaya
"""
import numpy as np
from nupic.encoders import ScalarEncoder
ScalarEncoder?

enc = ScalarEncoder(n=22, w=3, minval=2.5, maxval=97.5, clipInput=False, forced=True)
print "3 =", enc.encode(3)
print "4 =", enc.encode(4)
print "5 =", enc.encode(5)
print "1000 =", enc.encode(1000)


from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder

RandomDistributedScalarEncoder?

rdse = RandomDistributedScalarEncoder(n=21, w=3, resolution=5, offset=2.5)

print "3 =   ", rdse.encode(3)
print "4 =   ", rdse.encode(4)
print "5 =   ", rdse.encode(5)
print
print "100 = ", rdse.encode(100)
print "100000 =", rdse.encode(1000)

#!/usr/bin/env python

import rospy
import numpy

from nupic.encoders import ScalarEncoder
from nupic.research.spatial_pooler import SpatialPooler

enc = ScalarEncoder(n=10000, w=21, minval = 0, maxval=10000)

from std_msgs.msg import String, Float64
t =[]
for i in range(10000):
    t.append(enc.encode(i))

print("Encoding is done")

sp = SpatialPooler(inputDimensions=(10000,),
                   columnDimensions=(20,),
                   potentialRadius=15,
                   numActiveColumnsPerInhArea=1,
                   globalInhibition=True,
                   synPermActiveInc=0.03,
                   potentialPct=1.0)
output = numpy.zeros((20,),dtype="int")
for _ in range(10):
    for i in xrange(10000):
        sp.compute(t[i], learn=True, activeArray=output)

print("Spatial pooler strengthened")
예제 #28
0
def createEncoder():

    volume_encoder = ScalarEncoder(7,
                                   0.0,
                                   70.0,
                                   n=200,
                                   name="volume",
                                   forced=True)
    floorheight_encoder = ScalarEncoder(1,
                                        0.0,
                                        70.0,
                                        n=25,
                                        name="floorheight",
                                        forced=True)
    diagCoorA_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1000,
                                      name="diagCoorA")
    diagCoorB_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1000,
                                      name="diagCoorB")
    diagCoorC_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1000,
                                      name="diagCoorC")
    diagCoorD_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1000,
                                      name="diagCoorD")
    diagCoorE_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1000,
                                      name="diagCoorE")
    diagCoorF_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1000,
                                      name="diagCoorF")
    diagCoorG_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1000,
                                      name="diagCoorG")
    diagCoorH_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1000,
                                      name="diagCoorH")
    diagCoorI_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1000,
                                      name="diagCoorI")
    diagCoorJ_encoder = ScalarEncoder(105,
                                      0.0,
                                      200.0,
                                      n=1000,
                                      name="diagCoorJ")

    global encoder
    encoder = MultiEncoder()

    encoder.addEncoder("volume", volume_encoder)
    encoder.addEncoder("floorheight", floorheight_encoder)
    encoder.addEncoder("diagCoorA", diagCoorA_encoder)
    encoder.addEncoder("diagCoorB", diagCoorB_encoder)
    encoder.addEncoder("diagCoorC", diagCoorC_encoder)
    encoder.addEncoder("diagCoorD", diagCoorD_encoder)
    encoder.addEncoder("diagCoorE", diagCoorE_encoder)
    encoder.addEncoder("diagCoorF", diagCoorF_encoder)
    encoder.addEncoder("diagCoorG", diagCoorG_encoder)
    encoder.addEncoder("diagCoorH", diagCoorH_encoder)
    encoder.addEncoder("diagCoorI", diagCoorI_encoder)
    encoder.addEncoder("diagCoorJ", diagCoorJ_encoder)

    return encoder
예제 #29
0
def createTemporalAnomaly(recordParams,
                          spatialParams=_SP_PARAMS,
                          temporalParams=_TP_PARAMS,
                          verbosity=_VERBOSITY):
    """Generates a Network with connected RecordSensor, SP, TP.

  This function takes care of generating regions and the canonical links.
  The network has a sensor region reading data from a specified input and
  passing the encoded representation to an SPRegion.
  The SPRegion output is passed to a TPRegion.

  Note: this function returns a network that needs to be initialized. This
  allows the user to extend the network by adding further regions and
  connections.

  :param recordParams: a dict with parameters for creating RecordSensor region.
  :param spatialParams: a dict with parameters for creating SPRegion.
  :param temporalParams: a dict with parameters for creating TPRegion.
  :param verbosity: an integer representing how chatty the network will be.
  """
    inputFilePath = recordParams["inputFilePath"]
    scalarEncoderArgs = recordParams["scalarEncoderArgs"]
    dateEncoderArgs = recordParams["dateEncoderArgs"]

    scalarEncoder = ScalarEncoder(**scalarEncoderArgs)
    dateEncoder = DateEncoder(**dateEncoderArgs)

    encoder = MultiEncoder()
    encoder.addEncoder(scalarEncoderArgs["name"], scalarEncoder)
    encoder.addEncoder(dateEncoderArgs["name"], dateEncoder)

    network = Network()

    network.addRegion("sensor", "py.RecordSensor",
                      json.dumps({"verbosity": verbosity}))

    sensor = network.regions["sensor"].getSelf()
    sensor.encoder = encoder
    sensor.dataSource = FileRecordStream(streamID=inputFilePath)

    # Create the spatial pooler region
    spatialParams["inputWidth"] = sensor.encoder.getWidth()
    network.addRegion("spatialPoolerRegion", "py.SPRegion",
                      json.dumps(spatialParams))

    # Link the SP region to the sensor input
    network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
    network.link("sensor",
                 "spatialPoolerRegion",
                 "UniformLink",
                 "",
                 srcOutput="resetOut",
                 destInput="resetIn")
    network.link("spatialPoolerRegion",
                 "sensor",
                 "UniformLink",
                 "",
                 srcOutput="spatialTopDownOut",
                 destInput="spatialTopDownIn")
    network.link("spatialPoolerRegion",
                 "sensor",
                 "UniformLink",
                 "",
                 srcOutput="temporalTopDownOut",
                 destInput="temporalTopDownIn")

    # Add the TPRegion on top of the SPRegion
    network.addRegion("temporalPoolerRegion", "py.TPRegion",
                      json.dumps(temporalParams))

    network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink",
                 "")
    network.link("temporalPoolerRegion",
                 "spatialPoolerRegion",
                 "UniformLink",
                 "",
                 srcOutput="topDownOut",
                 destInput="topDownIn")

    spatialPoolerRegion = network.regions["spatialPoolerRegion"]

    # Make sure learning is enabled
    spatialPoolerRegion.setParameter("learningMode", True)
    # We want temporal anomalies so disable anomalyMode in the SP. This mode is
    # used for computing anomalies in a non-temporal model.
    spatialPoolerRegion.setParameter("anomalyMode", False)

    temporalPoolerRegion = network.regions["temporalPoolerRegion"]

    # Enable topDownMode to get the predicted columns output
    temporalPoolerRegion.setParameter("topDownMode", True)
    # Make sure learning is enabled (this is the default)
    temporalPoolerRegion.setParameter("learningMode", True)
    # Enable inference mode so we get predictions
    temporalPoolerRegion.setParameter("inferenceMode", True)
    # Enable anomalyMode to compute the anomaly score.
    temporalPoolerRegion.setParameter("anomalyMode", True)

    return network
    def testSimpleMulticlassNetworkPY(self):
        # Setup data record stream of fake data (with three categories)
        filename = _getTempFileName()
        fields = [("timestamp", "datetime", "T"), ("value", "float", ""),
                  ("reset", "int", "R"), ("sid", "int", "S"),
                  ("categories", "list", "C")]
        records = ([datetime(day=1, month=3, year=2010), 0.0, 1, 0, "0"], [
            datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1"
        ], [datetime(day=3, month=3, year=2010), 0.0, 0, 0,
            "0"], [datetime(day=4, month=3, year=2010), 1.0, 0, 0,
                   "1"], [datetime(day=5, month=3, year=2010), 0.0, 0, 0, "0"],
                   [datetime(day=6, month=3, year=2010), 1.0, 0, 0, "1"
                    ], [datetime(day=7, month=3, year=2010), 0.0, 0, 0, "0"],
                   [datetime(day=8, month=3, year=2010), 1.0, 0, 0, "1"])
        dataSource = FileRecordStream(streamID=filename,
                                      write=True,
                                      fields=fields)
        for r in records:
            dataSource.appendRecord(list(r))

        # Create the network and get region instances.
        net = Network()
        net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
        net.addRegion("classifier", "py.SDRClassifierRegion",
                      "{steps: '0', alpha: 0.001, implementation: 'py'}")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="dataOut",
                 destInput="bottomUpIn")
        net.link("sensor",
                 "classifier",
                 "UniformLink",
                 "",
                 srcOutput="categoryOut",
                 destInput="categoryIn")
        sensor = net.regions["sensor"]
        classifier = net.regions["classifier"]

        # Setup sensor region encoder and data stream.
        dataSource.close()
        dataSource = FileRecordStream(filename)
        sensorRegion = sensor.getSelf()
        sensorRegion.encoder = MultiEncoder()
        sensorRegion.encoder.addEncoder(
            "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
        sensorRegion.dataSource = dataSource

        # Get ready to run.
        net.initialize()

        # Train the network (by default learning is ON in the classifier, but assert
        # anyway) and then turn off learning and turn on inference mode.
        self.assertEqual(classifier.getParameter("learningMode"), 1)
        net.run(8)

        # Test the network on the same data as it trained on; should classify with
        # 100% accuracy.
        classifier.setParameter("inferenceMode", 1)
        classifier.setParameter("learningMode", 0)

        # Assert learning is OFF and that the classifier learned the dataset.
        self.assertEqual(classifier.getParameter("learningMode"), 0,
                         "Learning mode is not turned off.")
        self.assertEqual(classifier.getParameter("inferenceMode"), 1,
                         "Inference mode is not turned on.")

        # make sure we can access all the parameters with getParameter
        self.assertEqual(classifier.getParameter("maxCategoryCount"), 2000)
        self.assertAlmostEqual(float(classifier.getParameter("alpha")), 0.001)
        self.assertEqual(int(classifier.getParameter("steps")), 0)
        self.assertTrue(classifier.getParameter("implementation") == "py")
        self.assertEqual(classifier.getParameter("verbosity"), 0)

        expectedCats = (
            [0.0],
            [1.0],
            [0.0],
            [1.0],
            [0.0],
            [1.0],
            [0.0],
            [1.0],
        )
        dataSource.rewind()
        for i in xrange(8):
            net.run(1)
            inferredCats = classifier.getOutputData("categoriesOut")
            self.assertSequenceEqual(
                expectedCats[i], inferredCats.tolist(),
                "Classififer did not infer expected category "
                "for record number {}.".format(i))
        # Close data stream, delete file.
        dataSource.close()
        os.remove(filename)
def createTemporalAnomaly_chemical(recordParams, spatialParams, temporalParams,
                                   verbosity):

    inputFilePath = recordParams["inputFilePath"]
    scalarEncoder1Args = recordParams["scalarEncoder1Args"]
    scalarEncoder2Args = recordParams["scalarEncoder2Args"]
    scalarEncoder3Args = recordParams["scalarEncoder3Args"]
    scalarEncoder4Args = recordParams["scalarEncoder4Args"]
    scalarEncoder5Args = recordParams["scalarEncoder5Args"]
    scalarEncoder6Args = recordParams["scalarEncoder6Args"]
    scalarEncoder7Args = recordParams["scalarEncoder7Args"]
    dateEncoderArgs = recordParams["dateEncoderArgs"]

    scalarEncoder1 = ScalarEncoder(**scalarEncoder1Args)
    scalarEncoder2 = ScalarEncoder(**scalarEncoder2Args)
    scalarEncoder3 = ScalarEncoder(**scalarEncoder3Args)
    scalarEncoder4 = ScalarEncoder(**scalarEncoder4Args)
    scalarEncoder5 = ScalarEncoder(**scalarEncoder5Args)
    scalarEncoder6 = ScalarEncoder(**scalarEncoder6Args)
    scalarEncoder7 = ScalarEncoder(**scalarEncoder7Args)
    dateEncoder = DateEncoder(**dateEncoderArgs)

    encoder = MultiEncoder()
    encoder.addEncoder(scalarEncoder1Args["name"], scalarEncoder1)
    encoder.addEncoder(scalarEncoder2Args["name"], scalarEncoder2)
    encoder.addEncoder(scalarEncoder3Args["name"], scalarEncoder3)
    encoder.addEncoder(scalarEncoder4Args["name"], scalarEncoder4)
    encoder.addEncoder(scalarEncoder5Args["name"], scalarEncoder5)
    encoder.addEncoder(scalarEncoder6Args["name"], scalarEncoder6)
    encoder.addEncoder(scalarEncoder7Args["name"], scalarEncoder7)
    encoder.addEncoder(dateEncoderArgs["name"], dateEncoder)

    network = Network()

    network.addRegion("sensor", "py.RecordSensor",
                      json.dumps({"verbosity": verbosity}))

    sensor = network.regions["sensor"].getSelf()
    sensor.encoder = encoder
    sensor.dataSource = FileRecordStream(streamID=inputFilePath)

    # Create the spatial pooler region
    spatialParams["inputWidth"] = sensor.encoder.getWidth()
    network.addRegion("spatialPoolerRegion", "py.SPRegion",
                      json.dumps(spatialParams))

    # Link the SP region to the sensor input
    network.link("sensor", "spatialPoolerRegion", "UniformLink", "")
    network.link("sensor",
                 "spatialPoolerRegion",
                 "UniformLink",
                 "",
                 srcOutput="resetOut",
                 destInput="resetIn")
    network.link("spatialPoolerRegion",
                 "sensor",
                 "UniformLink",
                 "",
                 srcOutput="spatialTopDownOut",
                 destInput="spatialTopDownIn")
    network.link("spatialPoolerRegion",
                 "sensor",
                 "UniformLink",
                 "",
                 srcOutput="temporalTopDownOut",
                 destInput="temporalTopDownIn")

    # Add the TPRegion on top of the SPRegion
    network.addRegion("temporalPoolerRegion", "py.TMRegion",
                      json.dumps(temporalParams))

    network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink",
                 "")
    network.link("temporalPoolerRegion",
                 "spatialPoolerRegion",
                 "UniformLink",
                 "",
                 srcOutput="topDownOut",
                 destInput="topDownIn")

    # Add the AnomalyLikelihoodRegion on top of the TMRegion
    network.addRegion("anomalyLikelihoodRegion", "py.AnomalyLikelihoodRegion",
                      json.dumps({}))
    network.link("temporalPoolerRegion",
                 "anomalyLikelihoodRegion",
                 "UniformLink",
                 "",
                 srcOutput="anomalyScore",
                 destInput="rawAnomalyScore")
    network.link("sensor",
                 "anomalyLikelihoodRegion",
                 "UniformLink",
                 "",
                 srcOutput="sourceOut",
                 destInput="metricValue")

    spatialPoolerRegion = network.regions["spatialPoolerRegion"]

    # Make sure learning is enabled
    spatialPoolerRegion.setParameter("learningMode", True)
    # We want temporal anomalies so disable anomalyMode in the SP. This mode is
    # used for computing anomalies in a non-temporal model.
    spatialPoolerRegion.setParameter("anomalyMode", False)

    temporalPoolerRegion = network.regions["temporalPoolerRegion"]

    # Enable topDownMode to get the predicted columns output
    temporalPoolerRegion.setParameter("topDownMode", True)
    # Make sure learning is enabled (this is the default)
    temporalPoolerRegion.setParameter("learningMode", True)
    # Enable inference mode so we get predictions
    temporalPoolerRegion.setParameter("inferenceMode", True)
    # Enable anomalyMode to compute the anomaly score.
    temporalPoolerRegion.setParameter("anomalyMode", True)

    return network
예제 #32
0
def createEncoder():
    volume_encoder = ScalarEncoder(21,
                                   0.0,
                                   20.0,
                                   n=200,
                                   name="volume",
                                   clipInput=False)
    floorheight_encoder = ScalarEncoder(21,
                                        0.0,
                                        24.0,
                                        n=125,
                                        name="floorheight",
                                        clipInput=False)

    diagCoorA_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorA",
                                      clipInput=False)
    diagCoorB_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorB",
                                      clipInput=False)
    diagCoorC_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorC",
                                      clipInput=False)
    diagCoorD_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorD",
                                      clipInput=False)
    diagCoorE_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorE",
                                      clipInput=False)
    diagCoorF_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorF",
                                      clipInput=False)
    diagCoorG_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorG",
                                      clipInput=False)
    diagCoorH_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorH",
                                      clipInput=False)
    diagCoorI_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorI",
                                      clipInput=False)
    diagCoorJ_encoder = ScalarEncoder(21,
                                      0.0,
                                      200.0,
                                      n=200,
                                      name="diagCoorJ",
                                      clipInput=False)

    global encoder
    encoder = MultiEncoder()

    encoder.addEncoder("volume", volume_encoder)
    encoder.addEncoder("floorheight", floorheight_encoder)
    encoder.addEncoder("diagCoorA", diagCoorA_encoder)
    encoder.addEncoder("diagCoorB", diagCoorB_encoder)
    encoder.addEncoder("diagCoorC", diagCoorC_encoder)
    encoder.addEncoder("diagCoorD", diagCoorD_encoder)
    encoder.addEncoder("diagCoorE", diagCoorE_encoder)
    encoder.addEncoder("diagCoorF", diagCoorF_encoder)
    encoder.addEncoder("diagCoorG", diagCoorG_encoder)
    encoder.addEncoder("diagCoorH", diagCoorH_encoder)
    encoder.addEncoder("diagCoorI", diagCoorI_encoder)
    encoder.addEncoder("diagCoorJ", diagCoorJ_encoder)

    return encoder
예제 #33
0
def getDescription(datasets):
    encoder = MultiEncoder()
    encoder.addEncoder("date", DateEncoder(timeOfDay=3))
    encoder.addEncoder("amount", LogEncoder(name="amount", maxval=1000))
    for i in xrange(0, nRandomFields):
        s = ScalarEncoder(name="scalar",
                          minval=0,
                          maxval=randomFieldWidth,
                          resolution=1,
                          w=3)
        encoder.addEncoder("random%d" % i, s)

    dataSource = FunctionSource(
        generateFunction,
        dict(nRandomFields=nRandomFields, randomFieldWidth=randomFieldWidth))

    inputShape = (1, encoder.getWidth())

    # Layout the coincidences vertically stacked on top of each other, each
    # looking at the entire input field.
    coincidencesShape = (nCoincidences, 1)
    # TODO: why do we need input border?
    inputBorder = inputShape[1] / 2
    if inputBorder * 2 >= inputShape[1]:
        inputBorder -= 1

    nodeParams = dict()

    spParams = dict(
        commonDistributions=0,
        inputShape=inputShape,
        inputBorder=inputBorder,
        coincidencesShape=coincidencesShape,
        coincInputRadius=inputShape[1] / 2,
        coincInputPoolPct=0.75,
        gaussianDist=0,
        localAreaDensity=0.10,
        # localAreaDensity = 0.04,
        numActivePerInhArea=-1,
        dutyCyclePeriod=1000,
        stimulusThreshold=5,
        synPermInactiveDec=0.08,
        # synPermInactiveDec=0.02,
        synPermActiveInc=0.02,
        synPermActiveSharedDec=0.0,
        synPermOrphanDec=0.0,
        minPctDutyCycleBeforeInh=0.05,
        # minPctDutyCycleAfterInh = 0.1,
        # minPctDutyCycleBeforeInh = 0.05,
        minPctDutyCycleAfterInh=0.05,
        # minPctDutyCycleAfterInh = 0.4,
        seed=1,
    )

    otherParams = dict(
        disableTemporal=1,
        trainingStep='spatial',
    )

    nodeParams.update(spParams)
    nodeParams.update(otherParams)

    def mySetupCallback(experiment):
        print "Setup function called"

    description = dict(
        options=dict(logOutputsDuringInference=False, ),
        network=dict(sensorDataSource=dataSource,
                     sensorEncoder=encoder,
                     CLAType="py.CLARegion",
                     CLAParams=nodeParams,
                     classifierType=None,
                     classifierParams=None),

        # step
        spTrain=dict(
            name="phase1",
            setup=mySetupCallback,
            iterationCount=5000,
            #iter=displaySPCoincidences(100),
            finish=printSPCoincidences()),
        tpTrain=None,  # same format as sptrain if non-empty
        infer=None,  # same format as sptrain if non-empty
    )

    return description