예제 #1
0
class Model(object):


  def __init__(self):
    self.sensorEncoder = ScalarEncoder(n=512, w=21, minval=8.9, maxval=40,
                                       clipInput=True, forced=True)
    self.motorEncoder = ScalarEncoder(n=512, w=21, minval=-400, maxval=400,
                                      clipInput=True, forced=True)

    self.experimentRunner = SensorimotorExperimentRunner(
      tmOverrides={
        "columnDimensions": [512],
        "maxNewSynapseCount": 21*2,
        "minThreshold": 16*2,
        "activationThreshold": 16*2
      },
      tpOverrides={
        "columnDimensions": [512],
        "numActiveColumnsPerInhArea": 20,
        "poolingThreshUnpredicted": 0.5
      }
    )


  def feed(self, sensorValue, motorValue, sequenceLabel=None):    
    sensorSDR = set(self.sensorEncoder.encode(sensorValue).nonzero()[0].tolist())
    motorSDR = set((self.motorEncoder.encode(motorValue).nonzero()[0] +
                    self.sensorEncoder.n).tolist())
    sensorimotorSDR = sensorSDR.union(motorSDR)

    self.experimentRunner.feedTransition(sensorSDR, motorSDR, sensorimotorSDR,
                                         tmLearn=True, tpLearn=True,
                                         sequenceLabel=sequenceLabel)
예제 #2
0
def test_cla_se():
    from nupic.encoders import ScalarEncoder
    from nupic.algorithms.sdr_classifier import SDRClassifier as npSDRClassifier

    se = ScalarEncoder(n=10, w=3, minval=0, maxval=20, forced=True)
    queue = cl.CommandQueue(
        cl.Context([cl.get_platforms()[0].get_devices()[0]]))
    classifier = SDRClassifier(queue, 30, len(se.getBucketValues()))
    np_cla = npSDRClassifier(verbosity=1)
    print("Buckets", se.getBucketValues())
    val = 5
    for _ in range(0, 2):
        for i in range(0, 10):
            encoding = np.where(se.encode(val) == 1)[0]
            bucketIdx = se.getBucketIndices(val)[0]
            print("Actual Value: {} , Active Bits: {}, BucketIdx: {}".format(
                val, encoding, bucketIdx))
            classification = {'bucketIdx': bucketIdx, 'actValue': val}
            cl_preds = classifier.compute(i, encoding, classification, True,
                                          True)
            nupic_preds = np_cla.compute(i, encoding, classification, True,
                                         True)
            print("cl", cl_preds)
            print("nup", np_cla._actualValues)
            print("nup", nupic_preds)
            # assert cl_preds == nupic_preds
            val += 0.5
            print("-" * 32)
예제 #3
0
  def encodeLetters(self):
    letterEncoder = ScalarEncoder(n=self.numColumns, w=self.numActiveCells, minval=0, maxval=25)

    numLetters = np.shape(self.letters)[0]
    letterArray = np.zeros((numLetters, self.numColumns))
    letterIndices = []
    for k in range(numLetters):
      letterArray[k, :] = letterEncoder.encode(k)
      idxLetters = [i for i, j in izip(count(), letterArray[k]) if j == 1]
      letterIndices.append(idxLetters)

    return letterIndices
def loadThingData(dataDir="data", n=150, w=11):
    """
  Load Thing sensation data. There is one file per object, each row contains one
  feature, location pairs. The format is as follows:
    [(-33.6705, 75.5003, 2.4207)/10] => [[list of active bits of location],
                                         [list of active bits of feature]]
  The content before "=>" is the true 3D location / sensation
  We ignore the encoded values after "=>" and use :class:`ScalarEncoder` to
  encode the sensation in a way that is compatible with the experiment network.

  :param dataDir: The location data files
  :type dataDir: str
  :param n: The number of bits in the feature SDR. Usually L4 column count
  :type n: int
  :param w: Number of 'on' bits in the feature SDR. Usually L4 sample size
  :type w: int
  :return: Dictionary mapping objects to sensations that can be used directly by
           class L246aNetwork 'infer' and 'learn' methods
  :rtype: dict[str,list]
  """
    objects = defaultdict(list)

    # Thing features are scalar values ranging from 1-25 inclusive
    encoder = ScalarEncoder(n=n, w=w, minval=1, maxval=25, forced=True)

    dataPath = os.path.dirname(os.path.realpath(__file__))
    dataPath = os.path.join(dataPath, dataDir)
    objFiles = glob.glob1(dataPath, "*.log")

    for filename in objFiles:
        obj, _ = os.path.splitext(filename)

        # Read raw sensations from log file. Ignore SDRs after "=>"
        sensations = []
        with open(os.path.join(dataPath, filename)) as f:
            for line in f.readlines():
                # Parse raw location/feature values
                line = line.split("=>")[0].translate(None, "[,]()")
                locationStr, featureStr = line.split("/")
                location = map(float, locationStr.split())
                feature = encoder.encode(int(featureStr)).nonzero()[0].tolist()

                sensations.append((location, feature))

        # Assume single column
        objects[obj] = [sensations]

    return objects
예제 #5
0
def loadThingData(dataDir="data", n=150, w=11):
  """
  Load Thing sensation data. There is one file per object, each row contains one
  feature, location pairs. The format is as follows:
    [(-33.6705, 75.5003, 2.4207)/10] => [[list of active bits of location],
                                         [list of active bits of feature]]
  The content before "=>" is the true 3D location / sensation
  We ignore the encoded values after "=>" and use :class:`ScalarEncoder` to
  encode the sensation in a way that is compatible with the experiment network.

  :param dataDir: The location data files
  :type dataDir: str
  :param n: The number of bits in the feature SDR. Usually L4 column count
  :type n: int
  :param w: Number of 'on' bits in the feature SDR. Usually L4 sample size
  :type w: int
  :return: Dictionary mapping objects to sensations that can be used directly by
           class L246aNetwork 'infer' and 'learn' methods
  :rtype: dict[str,list]
  """
  objects = defaultdict(list)

  # Thing features are scalar values ranging from 1-25 inclusive
  encoder = ScalarEncoder(n=n, w=w, minval=1, maxval=25, forced=True)

  dataPath = os.path.dirname(os.path.realpath(__file__))
  dataPath = os.path.join(dataPath, dataDir)
  objFiles = glob.glob1(dataPath, "*.log")

  for filename in objFiles:
    obj, _ = os.path.splitext(filename)

    # Read raw sensations from log file. Ignore SDRs after "=>"
    sensations = []
    with open(os.path.join(dataPath, filename)) as f:
      for line in f.readlines():
        # Parse raw location/feature values
        line = line.split("=>")[0].translate(None, "[,]()")
        locationStr, featureStr = line.split("/")
        location = map(float, locationStr.split())
        feature = encoder.encode(int(featureStr)).nonzero()[0].tolist()

        sensations.append((location, feature))

    # Assume single column
    objects[obj] = [sensations]

  return objects
예제 #6
0
  def encodeTime(self):

    timeEncoder = ScalarEncoder(n=self.numTimeColumns,
                                w=self.numActiveTimeCells,
                                minval=0,
                                maxval=self.numTimeSteps,
                                forced=True)

    timeArray = np.zeros((self.numTimeSteps, self.numTimeColumns))
    timeIndices = []
    for k in range(self.numTimeSteps):
      timeArray[k, :] = timeEncoder.encode(k)
      idxTimes = [i for i, j in izip(count(), timeArray[k]) if j == 1]
      timeIndices.append(idxTimes)

    return timeIndices
예제 #7
0
    def encodeTime(self):

        timeEncoder = ScalarEncoder(n=self.numTimeColumns,
                                    w=self.numActiveTimeCells,
                                    minval=0,
                                    maxval=self.numTimeSteps,
                                    forced=True)

        timeArray = np.zeros((self.numTimeSteps, self.numTimeColumns))
        timeIndices = []
        for k in range(self.numTimeSteps):
            timeArray[k, :] = timeEncoder.encode(k)
            idxTimes = [i for i, j in izip(count(), timeArray[k]) if j == 1]
            timeIndices.append(idxTimes)

        return timeIndices
예제 #8
0
    def encodeLetters(self):
        letterEncoder = ScalarEncoder(n=self.numColumns,
                                      w=self.numActiveCells,
                                      minval=0,
                                      maxval=25)

        numLetters = np.shape(self.letters)[0]
        letterArray = np.zeros((numLetters, self.numColumns))
        letterIndices = []
        for k in range(numLetters):
            letterArray[k, :] = letterEncoder.encode(k)
            idxLetters = [
                i for i, j in izip(count(), letterArray[k]) if j == 1
            ]
            letterIndices.append(idxLetters)

        return letterIndices
예제 #9
0
def test_tm():
    from nupic.encoders import ScalarEncoder
    from nupic.regions import TPRegion
    columns = 128
    se = ScalarEncoder(n=21 + 50, w=3 + 9, minval=0, maxval=100, forced=True)
    queue = cl.CommandQueue(
        cl.Context([cl.get_platforms()[0].get_devices()[0]]))
    tm = TemporalMemory(queue,
                        columnCount=columns,
                        inputWidth=se.n,
                        verbosity=1,
                        inputActive=se.w)
    tm_nupic = TPRegion.TPRegion(columnCount=columns, inputWidth=se.n)

    val = 5

    def make_output_dict():
        return {
            'topDownOut': np.zeros(64),
            'bottomUpOut': np.zeros(columns, dtype=np.float),
            'lrnActiveStateT': np.zeros(columns),
            'anomalyScore': np.empty(1),
            'activeCells': np.zeros(64),
            'predictiveActiveCells': np.zeros(64)
        }

    cl_out = make_output_dict()
    nupic_out = make_output_dict()
    for _ in range(0, 2):
        for i in range(0, 10):
            encoding = se.encode(val)
            bucketIdx = se.getBucketIndices(val)[0]
            print("Actual Value: {} , Active Bits: {}, BucketIdx: {}".format(
                val, np.where(encoding == 1), bucketIdx))
            tm.compute(encoding, cl_out)
            tm_nupic.compute(encoding, nupic_out)
            val += 0.5
            print("-" * 10)
예제 #10
0
def test_sp():
    from nupic.encoders import ScalarEncoder
    from nupic.regions import SPRegion
    columns = 128
    se = ScalarEncoder(n=21 + 50, w=3 + 9, minval=0, maxval=100, forced=True)
    queue = cl.CommandQueue(
        cl.Context([cl.get_platforms()[0].get_devices()[0]]))
    sp = SpatialPooler(queue,
                       columnCount=columns,
                       inputWidth=se.n,
                       spVerbosity=1)
    sp_nupic = SPRegion.SPRegion(columnCount=columns, inputWidth=se.n)

    val = 1
    # return
    for _ in range(0, 2):
        for i in range(0, 10):
            encoding = se.encode(val)
            bucketIdx = se.getBucketIndices(val)[0]
            print("Actual Value: {} , Active Bits: {}, BucketIdx: {}".format(
                val, np.where(encoding == 1), bucketIdx))
            sp.compute(encoding, True, method=2)
            val += 0.5
            print("-" * 10)
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 11 22:11:49 2018

@author: Arunodhaya
"""
import numpy as np
from nupic.encoders import ScalarEncoder
ScalarEncoder?

enc = ScalarEncoder(n=22, w=3, minval=2.5, maxval=97.5, clipInput=False, forced=True)
print "3 =", enc.encode(3)
print "4 =", enc.encode(4)
print "5 =", enc.encode(5)
print "1000 =", enc.encode(1000)


from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder

RandomDistributedScalarEncoder?

rdse = RandomDistributedScalarEncoder(n=21, w=3, resolution=5, offset=2.5)

print "3 =   ", rdse.encode(3)
print "4 =   ", rdse.encode(4)
print "5 =   ", rdse.encode(5)
print
print "100 = ", rdse.encode(100)
print "100000 =", rdse.encode(1000)

예제 #12
0
파일: Overview.py 프로젝트: MichoelSnow/HTM
from nupic.encoders import ScalarEncoder

# Scalar encoders
# n is number of bits
# w is the number of on bits
# minval and maxval is the range the bits represent
enc = ScalarEncoder(n=22,
                    w=3,
                    minval=2.5,
                    maxval=97.5,
                    clipInput=True,
                    forced=True)
enc = ScalarEncoder(n=22,
                    w=3,
                    minval=0,
                    maxval=100,
                    clipInput=True,
                    forced=True)
[print(enc.encode(i)) for i in xrange(1, 10)]
print("3 =", enc.encode(10200))

enc = ScalarEncoder(n=14,
                    w=3,
                    minval=1,
                    maxval=8,
                    clipInput=True,
                    forced=True,
                    periodic=True)

enc.encode(1.5)
#!/usr/bin/env python

import rospy
import numpy

from nupic.encoders import ScalarEncoder
from nupic.research.spatial_pooler import SpatialPooler

enc = ScalarEncoder(n=10000, w=21, minval = 0, maxval=10000)

from std_msgs.msg import String, Float64
t =[]
for i in range(10000):
    t.append(enc.encode(i))

print("Encoding is done")

sp = SpatialPooler(inputDimensions=(10000,),
                   columnDimensions=(20,),
                   potentialRadius=15,
                   numActiveColumnsPerInhArea=1,
                   globalInhibition=True,
                   synPermActiveInc=0.03,
                   potentialPct=1.0)
output = numpy.zeros((20,),dtype="int")
for _ in range(10):
    for i in xrange(10000):
        sp.compute(t[i], learn=True, activeArray=output)

print("Spatial pooler strengthened")
예제 #14
0
    start_time = timeit.default_timer()
    total_n = 5
    keras_model = Sequential()
    keras_model.add(Dense(41, input_dim=total_n, activation=discrete_sigmoid))
    #keras_model.add(Activation(discrete_sigmoid))
    #keras_model.add(Dropout(0.25))

    keras_model.add(Dense(buckets, input_shape=(total_n,), activation='softmax' ))
    keras_model.compile(loss='categorical_crossentropy', optimizer=adam(lr=lr))
    data=[]
    labels=[]
    for i in range(0,nTrain):
        inputRecord = getInputRecord(df, predictedField, i)
        encoded = []
        if use_pred:
            encoded.extend(enc.encode(inputRecord[predictedField]).tolist())
        if use_date:
            encoded.extend(encDate.encode(inputRecord['dayofweek']).tolist())
        if use_time:
            encoded.extend(encTime.encode(inputRecord['timeofday']).tolist())
        #https://ianlondon.github.io/blog/encoding-cyclical-features-24hour-time/
        time_sin = np.sin(2 * np.pi * inputRecord['timeofday'] / 1410)
        time_cos = np.cos(2 * np.pi * inputRecord['timeofday'] / 1410)
        day_sin = np.sin(2 * np.pi * inputRecord['dayofweek'] / 7)
        day_cos = np.cos(2 * np.pi * inputRecord['dayofweek'] / 7)
        raw = [inputRecord[predictedField] / 40000.0, time_sin, time_cos, day_sin, day_cos]
        data.append(raw)
        outputRecord = getInputRecord(df, predictedField, i+5)
        encodedOut = encOut.encode(outputRecord[predictedField])
        encodedOutIndex = np.where(encodedOut==1)[0][0]
        encodedOut = np.zeros((buckets,), dtype=np.int)
예제 #15
0
	b += 1

	# Grab a sample from our audio input.

	stream.start_stream()
	data = stream.read(1024*5)
	stream.stop_stream()

	# Turn our sample into a decibel measurement.

	rms = audioop.rms(data,2)
	decibel = int(20 * math.log10(rms))

	# Turn our decibel number into a sparse distributed representation.

	encoded = enc.encode(decibel)

	# Add our encoded representation to the temporal pooler.

	tp.compute(encoded, enableLearn = True, computeInfOutput = True)

	# For the curious:
	#tp.printCells()
	#tp.printStates(printPrevious=False, printLearnState=False)

	predictedCells = tp.getPredictedState()

	decval = 0
	if predictedCells.any():
		decval = predictedCells.max(axis=1).nonzero()[0][-1]
예제 #16
0
    b += 1

    # Grab a sample from our audio input.

    stream.start_stream()
    data = stream.read(1024 * 5)
    stream.stop_stream()

    # Turn our sample into a decibel measurement.

    rms = audioop.rms(data, 2)
    decibel = int(20 * math.log10(rms))

    # Turn our decibel number into a sparse distributed representation.

    encoded = enc.encode(decibel)

    # Add our encoded representation to the temporal pooler.

    tp.compute(encoded, enableLearn=True, computeInfOutput=True)

    # For the curious:
    #tp.printCells()
    #tp.printStates(printPrevious=False, printLearnState=False)

    predictedCells = tp.getPredictedState()

    decval = 0
    if predictedCells.any():
        decval = predictedCells.max(axis=1).nonzero()[0][-1]