Esempio n. 1
0
def main():
    x = 10
    y = 10
    steps = 10000
    history = []
    world = np.array([i for i in xrange(625)])
    world.resize((25, 25))
    sp = SpatialPooler(
        inputDimensions=(625, ),
        columnDimensions=(25, ),
        potentialRadius=625,
        numActiveColumnsPerInhArea=1,
    )
    output = np.zeros((25, ), dtype=np.uint32)
    for _ in xrange(steps):
        active = getActive(world, x, y)
        assert len(active) == 25, "{}, {}: {}".format(x, y, active)
        activeInput = np.zeros((625, ), dtype=np.uint32)
        for v in active:
            activeInput[v] = 1
        history.append(active)
        sp.compute(activeInput, True, output)
        x, y = getNewLocation(x, y, 25, 2, False)

    for i in xrange(25):
        permanence = np.zeros((625, ))
        sp.getPermanence(i, permanence)
        plt.imshow(permanence.reshape((25, 25)),
                   cmap="hot",
                   interpolation="nearest")
        plt.show()
Esempio n. 2
0
 def create_sp(self):
     sp = SpatialPooler(
       # How large the input encoding will be.
       inputDimensions=(len(self.encoding[0])),
       # How many mini-columns will be in the Spatial Pooler.
       columnDimensions=(2048),
       # What percent of the columns's receptive field is 
       # available for potential synapses?
       potentialPct=0.85,
       # This means that the input space has no topology.
       globalInhibition=True,
       localAreaDensity=-1.0,
       # Roughly 2%, giving that there is only one inhibition area because 
       # we have turned on globalInhibition (40 / 2048 = 0.0195)
       numActiveColumnsPerInhArea=40.0,
       # How quickly synapses grow and degrade.
       synPermInactiveDec=0.005,
       synPermActiveInc=0.04,
       synPermConnected=0.1,
       # boostStrength controls the strength of boosting. Boosting 
       # encourages efficient usage of SP columns.
       boostStrength=3.0,
       # Random number generator seed.
       seed=1956,
       # Determines if inputs at the beginning and end of an input dimension 
       # should be considered neighbors when mapping columns to inputs.
       wrapAround=False)        
     return sp
    def test_whether_is_the_same_as_spatial_pooler(self):
        """
    Naive reality check for the encoding function 
    of the lateral pooler implementation.
    """
        n = 1024
        m = 784
        d = 100
        w = 20

        X = np.random.randint(0, 2, size=(m, d))
        Y_nup = np.zeros((n, d))
        Y_lat = np.zeros((n, d))

        params_nup = {
            "inputDimensions": [m, 1],
            "columnDimensions": [n, 1],
            "potentialRadius": n,
            "potentialPct": 1.0,
            "globalInhibition": True,
            "localAreaDensity": -1.0,
            "numActiveColumnsPerInhArea": w,
            "stimulusThreshold": 0,
            "synPermInactiveDec": 0.05,
            "synPermActiveInc": 0.1,
            "synPermConnected": 0.5,
            "minPctOverlapDutyCycle": 0.001,
            "dutyCyclePeriod": 1000,
            "boostStrength": 100.0,
            "seed": 1936
        }

        params_lat = params_nup.copy()
        params_lat["lateralLearningRate"] = 0.0
        params_lat["enforceDesiredWeight"] = False

        sp_nup = SpatialPooler(**params_nup)
        sp_lat = LateralPooler(**params_lat)

        for t in range(d):
            sp_nup.compute(X[:, t], False, Y_nup[:, t])
            sp_lat.compute(X[:, t], False, Y_lat[:, t])

        self.assertTrue(np.all(Y_nup == Y_lat),
                        "Produces wrong output even without learning.")

        for t in range(d):
            sp_nup.compute(X[:, t], True, Y_nup[:, t])
            sp_lat.compute(X[:, t], True, Y_lat[:, t])

        self.assertTrue(np.all(Y_nup == Y_lat),
                        "Wrong outputs, something diverges during learning.")

        W_nup = get_W(sp_nup)
        W_lat = get_W(sp_lat)
        self.assertTrue(
            np.all(W_nup == W_lat),
            "Wrong synaptic weights, something diverges during learning.")
Esempio n. 4
0
def init_imgs(write_output):
    #problems = parse_images.get_problems(folder_name)

    folder_name = 'Data/raw'
    problems = []
    for sub_folder in os.listdir(folder_name):
        f = os.path.join(folder_name, sub_folder)
        problems += parse_images.get_problems(f)
    print(len(problems))

    for problem in problems:
        problem['Input'] = problem['Input'].reshape((3, -1))
        problem['Output'] = problem['Output'].reshape((6, -1))

    # dimenstions
    num_cols1 = len(problems[0]['Input'][0])
    tm_cols = num_cols1
    layers = []
    if not write_output:
        num_cols2 = num_cols1 / 2
        num_cols3 = num_cols2 / 2
        sp1 = SpatialPooler(inputDimensions=(num_cols1, ),
                            columnDimensions=(num_cols2, ),
                            numActiveColumnsPerInhArea=-1,
                            localAreaDensity=0.05)

        sp2 = SpatialPooler(inputDimensions=(num_cols2, ),
                            columnDimensions=(num_cols3, ),
                            numActiveColumnsPerInhArea=-1,
                            localAreaDensity=0.05)
        tm_cols = num_cols3
        layers = [(sp1, sp_compute), (sp2, sp_compute)]

    bckTM = BTM(numberOfCols=tm_cols,
                cellsPerColumn=10,
                initialPerm=0.5,
                connectedPerm=0.5,
                minThreshold=10,
                newSynapseCount=10,
                activationThreshold=10,
                pamLength=10)

    layers += [(bckTM, bk_tm_compute)]
    return (layers, problems)
Esempio n. 5
0
def definir_SP(SIZE_ENCODER_):

    """ 
    retorna a classe sp 
    """

    N_COLUMNS= 2048

    sp = SpatialPooler(
        inputDimensions = (SIZE_ENCODER_,),
        columnDimensions = ( N_COLUMNS,), # in this case we will use 2048 mini-columns distributed in a "linear array" ...
            
        potentialRadius = SIZE_ENCODER_, # i set the potential radius of each mini-column as the whole ...
        #input space
            
        potentialPct = 0.8, # how many bits on the input space that should have some permanence with each mini-column
        ## attention: having a permanence value doesn't mean that it will be connected, to be connected the "connection...
        ## force" / permanence needs to be higher than the threshold.
            
        globalInhibition = True,  # means that the winning columns are selected in the neighborhood, though in this code...
        #we're dealing as if all the columns are neigbhors with one another
        localAreaDensity = -1.0,
        numActiveColumnsPerInhArea = NUM_ACTIVE_COLUMNS,
        stimulusThreshold = 0,
        ##Well, if we set the number of active columns per input, than there is no need to set an stimulusThreshold
        ##First = because the simulusTHreshold will be already set as the sum of permanences of the 40th column...@@NEED TO CHECK
        ##any other mini-column with less than it won't be active on this input 
            
        synPermInactiveDec = 0.0005, #if a column is active, the off (that aren't 1 in input) bits which it is connected ...
        # will have a decrement on the "synapse force"/permance @@
            
        synPermActiveInc = 0.003,#if a column is active, the on (that are 1 in input) bits which it is connected ...
        # will have a increment on the "synapse force"/permance @@
            
        synPermConnected = 0.2, #how much the "strength" of the connection between the on bit and the mini-column ...
        #needs to be for they to be connected. @@

        # @@ what needs to be checked is if the bits with synPermConnected < 0.1 will decrement ou increment when a column ...
        # is active - but i think they will increment

        minPctOverlapDutyCycle = 0.001, #  number between 0 and 1.0, used to set a floor on how often a column 
        #...should have at least stimulusThreshold active inputs  
            
        dutyCyclePeriod = 100, # how many "inputs seen" this should happen
            
        boostStrength = 0.01,

        seed = 47,
        spVerbosity = 0,
        wrapAround = False
    )
    return  sp
Esempio n. 6
0
def main():
    # cluster similar inputs together in SDR space
    s = SpatialPooler()
    print(type(s))

    # powerful sequence memory in SDR space
    t = TemporalMemory()
    print(type(t))

    # computes rolling Gaussian based on raw anomaly scores and then their
    # likelihood
    a = AnomalyLikelihood()
    print(type(a))

    # temporally groups active cell sets from TM
    u = UnionTemporalPooler()
    print(type(u))

    # learning pairings of Union representations and labeled classes
    c = SDRClassifier()
    print(type(c))
Esempio n. 7
0
def runTrial(ww, numColumns, potentialPct, inc, dec, mpo, dutyCycle, boost, steps, rr, spW, stimulusThreshold, connected, stepSize, jumpProb, directionStability):
  ws = ww ** 2
  x = 10
  y = 10
  locationHeatmap = np.zeros((ww, ww))
  history = []
  world = np.array([i for i in xrange(ws)])
  world.resize((ww, ww))
  sp = SpatialPooler(
      inputDimensions=(ws,),
      columnDimensions=(numColumns,),
      potentialRadius=ws,
      potentialPct=potentialPct,
      numActiveColumnsPerInhArea=spW,
      stimulusThreshold=stimulusThreshold,
      synPermActiveInc=inc,
      synPermInactiveDec=dec,
      synPermConnected=connected,
      minPctOverlapDutyCycle=mpo,
      dutyCyclePeriod=dutyCycle,
      boostStrength=boost,
      seed=1936,
      globalInhibition=True,
  )
  output = np.zeros((numColumns,), dtype=np.uint32)
  direction = 0
  for i in xrange(steps):
    locationHeatmap[x][y] += 1
    active = getActive(world, ww, x, y, rr)
    history.append(active)
    activeInput = np.zeros((ws,), dtype=np.uint32)
    for v in active:
      activeInput[v] = 1
    sp.compute(activeInput, True, output)
    x, y, direction = getNewLocation(x, y, ww, rr, wrap=True, locationHeatmap=locationHeatmap, stepSize=stepSize, jumpProb=jumpProb, direction=direction, directionStability=directionStability)

    if (i + 1) % 100 == 0:
      saveImage(history, ws, ww, numColumns, locationHeatmap, potentialPct, inc, dec, mpo, dutyCycle, boost, rr, spW, i+1, sp)

  saveImage(history, ws, ww, numColumns, locationHeatmap, potentialPct, inc, dec, mpo, dutyCycle, boost, rr, spW, steps, sp)
Esempio n. 8
0
def main():
    x = 10
    y = 10
    steps = 10000
    world = np.array([i for i in xrange(625)])
    world.resize((25, 25))
    spInputSize = 21 * 21
    sp = SpatialPooler(
        inputDimensions=(spInputSize, ),
        columnDimensions=(25, ),
        potentialRadius=spInputSize,
        numActiveColumnsPerInhArea=1,
        synPermActiveInc=0.1,
        synPermInactiveDec=0.5,
        boostStrength=1.0,
    )
    csFields = generateCenterSurroundFields()
    output = np.zeros((25, ), dtype=np.uint32)
    for _ in xrange(steps):
        active = getActive(world, x, y)
        assert len(active) == 25, "{}, {}: {}".format(x, y, active)
        activeInput = np.zeros((625, ), dtype=np.uint32)
        for v in active:
            activeInput[v] = 1
        centerSurround = processCenterSurround(csFields, activeInput)
        print centerSurround

        sp.compute(centerSurround, True, output)
        x, y = getNewLocation(x, y, 25, 2, False)

    for i in xrange(25):
        permanence = np.zeros((spInputSize, ))
        sp.getPermanence(i, permanence)
        plt.imshow(permanence.reshape((21, 21)),
                   cmap="hot",
                   interpolation="nearest")
        plt.show()
Esempio n. 9
0
    def toString(self):
        print("sentenceNum: " + self.id)
        print("startIdx " + self.startIdx)
        print("startIdx " + self.endIdx)
        print("anomalyScore " + self.anomalyScore)

        # parameters for the spatial pooler and temporal memory networks, only tm_only is used


sp_layer1 = SpatialPooler(inputDimensions=(128, 128),
                          columnDimensions=(64, 64),
                          potentialPct=0.1,
                          potentialRadius=5,
                          globalInhibition=False,
                          localAreaDensity=0.1,
                          numActiveColumnsPerInhArea=3,
                          synPermInactiveDec=0.5,
                          synPermActiveInc=0.02,
                          synPermConnected=0.90,
                          boostStrength=0.0,
                          wrapAround=False)

tm_only = TemporalMemory(
    inputDimensions=(4096, ),
    columnDimensions=(4096, ),
    cellsPerColumn=5,
    newSynapseCount=15,
    activationThreshold=15,
    initialPermanence=0.7,
    connectedPermanence=0.8,
    minThreshold=8,
Esempio n. 10
0
        # Pick a combination of parameter values
        parameters.nextCombination()
        #parameters.nextRandomCombination()
        synPermConn = parameters.getValue("synPermConn")
        synPermDec = synPermConn * parameters.getValue("synPermDecFrac")
        synPermInc = synPermConn * parameters.getValue("synPermIncFrac")

        # Instantiate our spatial pooler
        sp = SpatialPooler(
            inputDimensions=(32, 32),  # Size of image patch
            columnDimensions=(32, 32),
            potentialRadius=10000,  # Ensures 100% potential pool
            potentialPct=0.8,
            globalInhibition=True,
            localAreaDensity=-1,  # Using numActiveColumnsPerInhArea
            numActiveColumnsPerInhArea=64,
            # All input activity can contribute to feature output
            stimulusThreshold=0,
            synPermInactiveDec=synPermDec,
            synPermActiveInc=synPermInc,
            synPermConnected=synPermConn,
            boostStrength=1.0,
            seed=1956,  # The seed that Grok uses
            spVerbosity=1)

        # Instantiate the spatial pooler test bench.
        tb = VisionTestBench(sp)

        # Instantiate the classifier
        clf = KNNClassifier()

        # Train the spatial pooler on trainingVectors.
Esempio n. 11
0
        now = datetime.datetime.strptime(lines, "%Y-%m-%d %H:%M:%S")
        print "now =       ", de.encode(now)
    cpt += 1

categories = ('info', 'error', 'warning')
encoder = CategoryEncoder(w=3, categoryList=categories, forced=True)
info = encoder.encode("info")
error = encoder.encode("error")
warning = encoder.encode("warning")
#print "info =       ", info
#print "error =       ", error
#print "warning =       ", warning
sp = SpatialPooler(inputDimensions=(len(info), ),
                   columnDimensions=(3, ),
                   potentialRadius=15,
                   numActiveColumnsPerInhArea=1,
                   globalInhibition=True,
                   synPermActiveInc=0.03,
                   potentialPct=1.0)
import numpy
for column in xrange(3):
    connected = numpy.zeros((len(info), ), dtype="int")
    sp.getConnectedSynapses(column, connected)
    print connected

output = numpy.zeros((3, ), dtype="int")
sp.compute(info, learn=True, activeArray=output)
print output

output = numpy.zeros((3, ), dtype="int")
sp.compute(error, learn=True, activeArray=output)
Esempio n. 12
0
    def __init__(self, columnCount, InputEncoderParams, toL4ConnectorParamsI,
                 toL4ConnectorParamsII, toL5ConnectorParams,
                 toD1ConnectorParams, toD2ConnectorParams, L4Params, L5Params,
                 k, D1Params, D2Params):
        self.columnCount = columnCount
        self.toL4ConnectorParamsI = toL4ConnectorParamsI
        self.toL4ConnectorParamsII = toL4ConnectorParamsII
        self.toL5ConnectorParams = toL5ConnectorParams
        self.toD1ConnectorParams = toD1ConnectorParams
        self.toD2ConnectorParams = toD2ConnectorParams
        self.L4Params = L4Params
        self.L5Params = L5Params
        self.k = k
        self.D1Params = D1Params
        self.D2Params = D2Params
        self.learning = False

        #encoder
        from nupic.encoders import MultiEncoder
        self.InputEncoder = MultiEncoder()
        self.InputEncoder.addMultipleEncoders(InputEncoderParams)
        print "Encoder Online"

        #spatialPoolers
        from nupic.algorithms.spatial_pooler import SpatialPooler
        self.toL4ConnectorI = SpatialPooler(
            inputDimensions=(toL4ConnectorParamsI["inputDimensions"], ),
            columnDimensions=(columnCount, ),
            potentialPct=toL4ConnectorParamsI["potentialPct"],
            globalInhibition=toL4ConnectorParamsI["globalInhibition"],
            localAreaDensity=toL4ConnectorParamsI["localAreaDensity"],
            numActiveColumnsPerInhArea=toL4ConnectorParamsI[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL4ConnectorParamsI["synPermInactiveDec"],
            synPermActiveInc=toL4ConnectorParamsI["synPermActiveInc"],
            synPermConnected=toL4ConnectorParamsI["synPermConnected"],
            boostStrength=toL4ConnectorParamsI["boostStrength"],
            seed=toL4ConnectorParamsI["seed"],
            wrapAround=toL4ConnectorParamsI["wrapAround"])  #this part sucks
        self.toL4ConnectorII = SpatialPooler(
            inputDimensions=(columnCount * 3, ),
            columnDimensions=(columnCount, ),
            potentialPct=toL4ConnectorParamsII["potentialPct"],
            globalInhibition=toL4ConnectorParamsII["globalInhibition"],
            localAreaDensity=toL4ConnectorParamsII["localAreaDensity"],
            numActiveColumnsPerInhArea=toL4ConnectorParamsII[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL4ConnectorParamsII["synPermInactiveDec"],
            synPermActiveInc=toL4ConnectorParamsII["synPermActiveInc"],
            synPermConnected=toL4ConnectorParamsII["synPermConnected"],
            boostStrength=toL4ConnectorParamsII["boostStrength"],
            seed=toL4ConnectorParamsII["seed"],
            wrapAround=toL4ConnectorParamsII["wrapAround"])
        print "toL4Connector Online"
        self.toL5Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toL5ConnectorParams["potentialPct"],
            globalInhibition=toL5ConnectorParams["globalInhibition"],
            localAreaDensity=toL5ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toL5ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL5ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toL5ConnectorParams["synPermActiveInc"],
            synPermConnected=toL5ConnectorParams["synPermConnected"],
            boostStrength=toL5ConnectorParams["boostStrength"],
            seed=toL5ConnectorParams["seed"],
            wrapAround=toL5ConnectorParams["wrapAround"])
        print "toL5Connector Online"
        self.toD1Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toD1ConnectorParams["potentialPct"],
            globalInhibition=toD1ConnectorParams["globalInhibition"],
            localAreaDensity=toD1ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toD1ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toD1ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toD1ConnectorParams["synPermActiveInc"],
            synPermConnected=toD1ConnectorParams["synPermConnected"],
            boostStrength=toD1ConnectorParams["boostStrength"],
            seed=toD1ConnectorParams["seed"],
            wrapAround=toD1ConnectorParams["wrapAround"])
        print "toD1Connector Online"
        self.toD2Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toD2ConnectorParams["potentialPct"],
            globalInhibition=toD2ConnectorParams["globalInhibition"],
            localAreaDensity=toD2ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toD2ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toD2ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toD2ConnectorParams["synPermActiveInc"],
            synPermConnected=toD2ConnectorParams["synPermConnected"],
            boostStrength=toD2ConnectorParams["boostStrength"],
            seed=toD2ConnectorParams["seed"],
            wrapAround=toD2ConnectorParams["wrapAround"])
        print "toD2Connector Online"

        #HTM Layers
        from nupic.algorithms.temporal_memory import TemporalMemory
        self.L4ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.L4 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
        )
        print "L4 Online"
        self.L5ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.L5 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
        )
        print "L5 Online"
        self.D1ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.D1 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
            initialPermanence=0.21,
            connectedPermanence=0.5,
        )
        print "D1 Online"
        self.D2ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.D2 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
            initialPermanence=0.21,
            connectedPermanence=0.5,
        )
        print "D2 Online"
Esempio n. 13
0
if __name__ == "__main__":
    # Get training images and convert them to vectors.
    trainingImages, trainingTags = dataset_readers.getImagesAndTags(
        trainingDataset)
    trainingVectors = encoder.imagesToVectors(trainingImages)

    # Instantiate the python spatial pooler
    sp = SpatialPooler(
        inputDimensions=32**2,  # Size of image patch
        columnDimensions=16,  # Number of potential features
        potentialRadius=10000,  # Ensures 100% potential pool
        potentialPct=1,  # Neurons can connect to 100% of input
        globalInhibition=True,
        localAreaDensity=-1,  # Using numActiveColumnsPerInhArea
        #localAreaDensity=0.02, # one percent of columns active at a time
        #numActiveColumnsPerInhArea=-1, # Using percentage instead
        numActiveColumnsPerInhArea=1,  # Only one feature active at a time
        # All input activity can contribute to feature output
        stimulusThreshold=0,
        synPermInactiveDec=0.3,
        synPermActiveInc=0.3,
        synPermConnected=0.3,  # Connected threshold
        boostStrength=2,
        seed=1956,  # The seed that Grok uses
        spVerbosity=1)

    # Instantiate the spatial pooler test bench.
    tb = VisionTestBench(sp)

    # Instantiate the classifier
    clf = exactMatch()
def runHotgym(numRecords):
    with open(_PARAMS_PATH, "r") as f:
        modelParams = yaml.safe_load(f)["modelParams"]
        enParams = modelParams["sensorParams"]["encoders"]
        spParams = modelParams["spParams"]
        tmParams = modelParams["tmParams"]

    timeOfDayEncoder = DateEncoder(
        timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"])
    weekendEncoder = DateEncoder(
        weekend=enParams["timestamp_weekend"]["weekend"])
    CtEncoder = RandomDistributedScalarEncoder(enParams["Ct"]["resolution"])
    ZIP_10467Encoder = RandomDistributedScalarEncoder(
        enParams["ZIP_10467"]["resolution"])
    #  ZIP_10462Encoder = RandomDistributedScalarEncoder(enParams["ZIP_10462"]["resolution"])
    #  ZIP_10475Encoder = RandomDistributedScalarEncoder(enParams["ZIP_10475"]["resolution"])
    #  ZIP_10466Encoder = RandomDistributedScalarEncoder(enParams["ZIP_10466"]["resolution"])
    #  ZIP_10469Encoder = RandomDistributedScalarEncoder(enParams["ZIP_10469"]["resolution"])
    #  DEPT_11Encoder = RandomDistributedScalarEncoder(enParams["DEPT_11"]["resolution"])
    #  DEPT_24Encoder = RandomDistributedScalarEncoder(enParams["DEPT_24"]["resolution"])
    #  DEPT_41Encoder = RandomDistributedScalarEncoder(enParams["DEPT_41"]["resolution"])
    #  DEPT_34Encoder = RandomDistributedScalarEncoder(enParams["DEPT_34"]["resolution"])
    #  DEPT_31Encoder = RandomDistributedScalarEncoder(enParams["DEPT_31"]["resolution"])
    #  DEPT_60Encoder = RandomDistributedScalarEncoder(enParams["DEPT_60"]["resolution"])
    #  AGE_0_9Encoder = RandomDistributedScalarEncoder(enParams["AGE_0_9"]["resolution"])
    #  AGE_10_19Encoder = RandomDistributedScalarEncoder(enParams["AGE_10_19"]["resolution"])
    #  AGE_20_29Encoder = RandomDistributedScalarEncoder(enParams["AGE_20_29"]["resolution"])
    #  AGE_30_39Encoder = RandomDistributedScalarEncoder(enParams["AGE_30_39"]["resolution"])
    #  AGE_40_49Encoder = RandomDistributedScalarEncoder(enParams["AGE_40_49"]["resolution"])
    #  AGE_50_59Encoder = RandomDistributedScalarEncoder(enParams["AGE_50_59"]["resolution"])
    #  AGE_60_69Encoder = RandomDistributedScalarEncoder(enParams["AGE_60_69"]["resolution"])
    #  AGE_70_79Encoder = RandomDistributedScalarEncoder(enParams["AGE_70_79"]["resolution"])
    #  AGE_80_89Encoder = RandomDistributedScalarEncoder(enParams["AGE_80_89"]["resolution"])
    #  AGE_90_99Encoder = RandomDistributedScalarEncoder(enParams["AGE_90_99"]["resolution"])
    #  DIST_1_7Encoder = RandomDistributedScalarEncoder(enParams["DIST_1_7"]["resolution"])
    #  DIST_8_14Encoder = RandomDistributedScalarEncoder(enParams["DIST_8_14"]["resolution"])
    #  DIST_15_21Encoder = RandomDistributedScalarEncoder(enParams["DIST_15_21"]["resolution"])
    #  DIST_22_28Encoder = RandomDistributedScalarEncoder(enParams["DIST_22_28"]["resolution"])
    #  DIST_29_35Encoder = RandomDistributedScalarEncoder(enParams["DIST_29_35"]["resolution"])
    #  DIST_36_42Encoder = RandomDistributedScalarEncoder(enParams["DIST_36_42"]["resolution"])
    #  DIST_43_49Encoder = RandomDistributedScalarEncoder(enParams["DIST_43_49"]["resolution"])
    #  DIST_50_56Encoder = RandomDistributedScalarEncoder(enParams["DIST_50_56"]["resolution"])
    #  DIST_57_63Encoder = RandomDistributedScalarEncoder(enParams["DIST_57_63"]["resolution"])
    #  DIST_64_70Encoder = RandomDistributedScalarEncoder(enParams["DIST_64_70"]["resolution"])

    encodingWidth = (timeOfDayEncoder.getWidth() + weekendEncoder.getWidth() +
                     CtEncoder.getWidth() * 2)

    sp = SpatialPooler(
        inputDimensions=(encodingWidth, ),
        columnDimensions=(spParams["columnCount"], ),
        potentialPct=spParams["potentialPct"],
        potentialRadius=encodingWidth,
        globalInhibition=spParams["globalInhibition"],
        localAreaDensity=spParams["localAreaDensity"],
        numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
        synPermInactiveDec=spParams["synPermInactiveDec"],
        synPermActiveInc=spParams["synPermActiveInc"],
        synPermConnected=spParams["synPermConnected"],
        boostStrength=spParams["boostStrength"],
        seed=spParams["seed"],
        wrapAround=True)

    tm = TemporalMemory(
        columnDimensions=(tmParams["columnCount"], ),
        cellsPerColumn=tmParams["cellsPerColumn"],
        activationThreshold=tmParams["activationThreshold"],
        initialPermanence=tmParams["initialPerm"],
        connectedPermanence=spParams["synPermConnected"],
        minThreshold=tmParams["minThreshold"],
        maxNewSynapseCount=tmParams["newSynapseCount"],
        permanenceIncrement=tmParams["permanenceInc"],
        permanenceDecrement=tmParams["permanenceDec"],
        predictedSegmentDecrement=0.0,
        maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
        maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
        seed=tmParams["seed"])

    classifier = SDRClassifierFactory.create()
    results = []
    with open(_INPUT_FILE_PATH, "r") as fin:
        reader = csv.reader(fin)
        headers = reader.next()
        reader.next()
        reader.next()

        output = output_anomaly_generic_v1.NuPICFileOutput(_FILE_NAME)

        for count, record in enumerate(reader):

            if count >= numRecords: break

            # Convert data string into Python date object.
            dateString = datetime.datetime.strptime(record[0],
                                                    "%Y-%m-%d %H:%M:%S")
            # Convert data value string into float.
            Ct = float(record[1])
            ZIP_10467 = float(record[2])
            #      ZIP_10462 = float(record[3])
            #      ZIP_10475 = float(record[4])
            #      ZIP_10466 = float(record[5])
            #      ZIP_10469 = float(record[6])
            #      DEPT_11 = float(record[7])
            #      DEPT_24 = float(record[8])
            #      DEPT_41 = float(record[9])
            #      DEPT_34 = float(record[10])
            #      DEPT_31 = float(record[11])
            #      DEPT_60 = float(record[12])
            #      AGE_0_9 = float(record[13])
            #      AGE_10_19 = float(record[14])
            #      AGE_20_29 = float(record[15])
            #      AGE_30_39 = float(record[16])
            #      AGE_40_49 = float(record[17])
            #      AGE_50_59 = float(record[18])
            #      AGE_60_69 = float(record[19])
            #      AGE_70_79 = float(record[20])
            #      AGE_80_89 = float(record[21])
            #      AGE_90_99 = float(record[22])
            #      DIST_1_7 = float(record[23])
            #      DIST_8_14 = float(record[24])
            #      DIST_15_21 = float(record[25])
            #      DIST_22_28 = float(record[26])
            #      DIST_29_35 = float(record[27])
            #      DIST_36_42 = float(record[28])
            #      DIST_43_49 = float(record[29])
            #      DIST_50_56 = float(record[30])
            #      DIST_57_63 = float(record[31])
            #      DIST_64_70 = float(record[31])

            # To encode, we need to provide zero-filled numpy arrays for the encoders
            # to populate.
            timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
            weekendBits = numpy.zeros(weekendEncoder.getWidth())
            CtBits = numpy.zeros(CtEncoder.getWidth())
            ZIP_10467Bits = numpy.zeros(ZIP_10467Encoder.getWidth())
            #      ZIP_10462Bits = numpy.zeros(ZIP_10462Encoder.getWidth())
            #      ZIP_10475Bits = numpy.zeros(ZIP_10475Encoder.getWidth())
            #      ZIP_10466Bits = numpy.zeros(ZIP_10466Encoder.getWidth())
            #      ZIP_10469Bits = numpy.zeros(ZIP_10469Encoder.getWidth())
            #      DEPT_11Bits = numpy.zeros(DEPT_11Encoder.getWidth())
            #      DEPT_24Bits = numpy.zeros(DEPT_24Encoder.getWidth())
            #      DEPT_41Bits = numpy.zeros(DEPT_41Encoder.getWidth())
            #      DEPT_34Bits = numpy.zeros(DEPT_34Encoder.getWidth())
            #      DEPT_31Bits = numpy.zeros(DEPT_31Encoder.getWidth())
            #      DEPT_60Bits = numpy.zeros(DEPT_60Encoder.getWidth())
            #      AGE_0_9Bits = numpy.zeros(AGE_0_9Encoder.getWidth())
            #      AGE_10_19Bits = numpy.zeros(AGE_10_19Encoder.getWidth())
            #      AGE_20_29Bits = numpy.zeros(AGE_20_29Encoder.getWidth())
            #      AGE_30_39Bits = numpy.zeros(AGE_30_39Encoder.getWidth())
            #      AGE_40_49Bits = numpy.zeros(AGE_40_49Encoder.getWidth())
            #      AGE_50_59Bits = numpy.zeros(AGE_50_59Encoder.getWidth())
            #      AGE_60_69Bits = numpy.zeros(AGE_60_69Encoder.getWidth())
            #      AGE_70_79Bits = numpy.zeros(AGE_70_79Encoder.getWidth())
            #      AGE_80_89Bits = numpy.zeros(AGE_80_89Encoder.getWidth())
            #      AGE_90_99Bits = numpy.zeros(AGE_90_99Encoder.getWidth())
            #      DIST_1_7Bits = numpy.zeros(DIST_1_7Encoder.getWidth())
            #      DIST_8_14Bits = numpy.zeros(DIST_8_14Encoder.getWidth())
            #      DIST_15_21Bits = numpy.zeros(DIST_15_21Encoder.getWidth())
            #      DIST_22_28Bits = numpy.zeros(DIST_22_28Encoder.getWidth())
            #      DIST_29_35Bits = numpy.zeros(DIST_29_35Encoder.getWidth())
            #      DIST_36_42Bits = numpy.zeros(DIST_36_42Encoder.getWidth())
            #      DIST_43_49Bits = numpy.zeros(DIST_43_49Encoder.getWidth())
            #      DIST_50_56Bits = numpy.zeros(DIST_50_56Encoder.getWidth())
            #      DIST_57_63Bits = numpy.zeros(DIST_57_63Encoder.getWidth())
            #      DIST_64_70Bits = numpy.zeros(DIST_64_70Encoder.getWidth())

            # Now we call the encoders to create bit representations for each value.
            timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
            weekendEncoder.encodeIntoArray(dateString, weekendBits)
            CtEncoder.encodeIntoArray(Ct, CtBits)
            ZIP_10467Encoder.encodeIntoArray(ZIP_10467, ZIP_10467Bits)
            #      ZIP_10462Encoder.encodeIntoArray(ZIP_10462, ZIP_10462Bits)
            #      ZIP_10475Encoder.encodeIntoArray(ZIP_10475, ZIP_10475Bits)
            #      ZIP_10466Encoder.encodeIntoArray(ZIP_10466, ZIP_10466Bits)
            #      ZIP_10469Encoder.encodeIntoArray(ZIP_10469, ZIP_10469Bits)
            #      DEPT_11Encoder.encodeIntoArray(DEPT_11, DEPT_11Bits)
            #      DEPT_24Encoder.encodeIntoArray(DEPT_24, DEPT_24Bits)
            #      DEPT_41Encoder.encodeIntoArray(DEPT_41, DEPT_41Bits)
            #      DEPT_34Encoder.encodeIntoArray(DEPT_34, DEPT_34Bits)
            #      DEPT_31Encoder.encodeIntoArray(DEPT_31, DEPT_31Bits)
            #      DEPT_60Encoder.encodeIntoArray(DEPT_60, DEPT_60Bits)
            #      AGE_0_9Encoder.encodeIntoArray(AGE_0_9, AGE_0_9Bits)
            #      AGE_10_19Encoder.encodeIntoArray(AGE_10_19, AGE_10_19Bits)
            #      AGE_20_29Encoder.encodeIntoArray(AGE_20_29, AGE_20_29Bits)
            #      AGE_30_39Encoder.encodeIntoArray(AGE_30_39, AGE_30_39Bits)
            #      AGE_40_49Encoder.encodeIntoArray(AGE_40_49, AGE_40_49Bits)
            #      AGE_50_59Encoder.encodeIntoArray(AGE_50_59, AGE_50_59Bits)
            #      AGE_60_69Encoder.encodeIntoArray(AGE_60_69, AGE_60_69Bits)
            #      AGE_70_79Encoder.encodeIntoArray(AGE_70_79, AGE_70_79Bits)
            #      AGE_80_89Encoder.encodeIntoArray(AGE_80_89, AGE_80_89Bits)
            #      AGE_90_99Encoder.encodeIntoArray(AGE_90_99, AGE_90_99Bits)
            #      DIST_1_7Encoder.encodeIntoArray(DIST_1_7, DIST_1_7Bits)
            #      DIST_8_14Encoder.encodeIntoArray(DIST_8_14, DIST_8_14Bits)
            #      DIST_15_21Encoder.encodeIntoArray(DIST_15_21, DIST_15_21Bits)
            #      DIST_22_28Encoder.encodeIntoArray(DIST_22_28, DIST_22_28Bits)
            #      DIST_29_35Encoder.encodeIntoArray(DIST_29_35, DIST_29_35Bits)
            #      DIST_36_42Encoder.encodeIntoArray(DIST_36_42, DIST_36_42Bits)
            #      DIST_43_49Encoder.encodeIntoArray(DIST_43_49, DIST_43_49Bits)
            #      DIST_50_56Encoder.encodeIntoArray(DIST_50_56, DIST_50_56Bits)
            #      DIST_57_63Encoder.encodeIntoArray(DIST_57_63, DIST_57_63Bits)
            #      DIST_64_70Encoder.encodeIntoArray(DIST_64_70, DIST_64_70Bits)
            # Concatenate all these encodings into one large encoding for Spatial
            # Pooling.
            encoding = numpy.concatenate(
                [timeOfDayBits, weekendBits, CtBits, ZIP_10467Bits])
            #      encoding = numpy.concatenate(
            #        [timeOfDayBits, weekendBits, CtBits,
            #         ZIP_10467Bits, ZIP_10462Bits, ZIP_10475Bits, ZIP_10466Bits, ZIP_10469Bits,
            #         DEPT_11Bits, DEPT_24Bits, DEPT_41Bits, DEPT_34Bits, DEPT_31Bits,
            #         DEPT_60Bits, AGE_0_9Bits, AGE_10_19Bits, AGE_20_29Bits, AGE_30_39Bits,
            #         AGE_40_49Bits, AGE_50_59Bits, AGE_60_69Bits, AGE_70_79Bits, AGE_80_89Bits,
            #         AGE_90_99Bits, DIST_1_7Bits, DIST_8_14Bits, DIST_15_21Bits, DIST_22_28Bits,
            #         DIST_29_35Bits, DIST_36_42Bits, DIST_43_49Bits, DIST_50_56Bits, DIST_57_63Bits,
            #         DIST_64_70Bits])

            # Create an array to represent active columns, all initially zero. This
            # will be populated by the compute method below. It must have the same
            # dimensions as the Spatial Pooler.
            activeColumns = numpy.zeros(spParams["columnCount"])

            # Execute Spatial Pooling algorithm over input space.
            sp.compute(encoding, True, activeColumns)
            activeColumnIndices = numpy.nonzero(activeColumns)[0]

            # Execute Temporal Memory algorithm over active mini-columns.
            tm.compute(activeColumnIndices, learn=True)

            activeCells = tm.getActiveCells()

            # Get the bucket info for this input value for classification.
            bucketIdx = CtEncoder.getBucketIndices(Ct)[0]

            # Run classifier to translate active cells back to scalar value.
            classifierResult = classifier.compute(recordNum=count,
                                                  patternNZ=activeCells,
                                                  classification={
                                                      "bucketIdx": bucketIdx,
                                                      "actValue": Ct
                                                  },
                                                  learn=True,
                                                  infer=True)

            # Print the best prediction for 1 step out.
            oneStepConfidence, oneStep = sorted(zip(
                classifierResult[1], classifierResult["actualValues"]),
                                                reverse=True)[0]
            # print("1-step: {:16} ({:4.4}%)".format(oneStep, oneStepConfidence * 100))
            #      results.append([oneStep, oneStepConfidence * 100, None, None])
            results.append([record[0], Ct, oneStep, oneStepConfidence * 100])
            output.write(record[0], Ct, oneStep, oneStepConfidence * 100)

        output.close()
        return results
Esempio n. 15
0
def HTM_AD(
        Data='Test',
        vars={'value': ['num']},
        prec_param=5,
        pooler_out=2024,  # Number of columns of the pooler output
        cell_col=5,  # HTM cells per column
        W=72,  # Window parameter
        W_prim=5,  # Local window for anomaly detection likelihood
        eps=1e-6,  # to Avoid by zero divisions
        athreshold=0.95):
    """
    This function performs HTM based anomaly detection on a time series provided
    :param Data:
    :param vars: Possible values: num, tod, weekend
    :param prec_param: A parameter that defines how much precision the number encoder has
        The encoder precision depends on the variability of the data,
        The real precision is computed taking into account both the precision parameter and data std
        A high precision might mean a high error at predicting the variable value in noisy variables
    :param pooler_out: Number of columns of the pooler output
    :param cell_col: HTM cells per column
    :param W: Window parameter
    :param W_prim: Local window for anomaly detection likelihood
    :param eps: to Avoid by zero divisions
    :param athreshold: To classify based on anomaly likelihood whether there is an anomaly or not
    :return: The Data + 3 columns
        Anomaly: indicates the error of within the value predicted by the HTM network
        Anomaly_likelihood: indicates the likelihood of the data into being anomalous
        Anomaly_flag: classifies the data in anomalous vs non anomalous
    """

    if Data == 'Test':  # If there is not data available, simply loads the temperature benchmark dataset
        # Import data
        Data = pd.read_csv('anomaly_API/Data/sample.csv',
                           parse_dates=True,
                           index_col='timestamp')
        Data = Data.resample('H').bfill().interpolate()

    TODE = DateEncoder(timeOfDay=(21, 1))
    WENDE = DateEncoder(weekend=21)

    var_encoders = set()
    # Spatial Pooler Parameters
    for x in vars:
        for y in vars[x]:
            if y == 'num':
                exec(
                    "RDSE_" + x +
                    " = RandomDistributedScalarEncoder(resolution=Data['" + x +
                    "'].std()/prec_param)", locals(), globals())
                var_encoders.add(Encoder(x, ["RDSE_" + x]))
            elif y == 'weekend':
                var_encoders.add(Encoder(x, ["WENDE"]))
            elif y == 'tod':
                var_encoders.add(Encoder(x, ["TODE"]))
            else:
                return {"error": "Variable encoder type is not recognized "}

    encoder_width = 0  # Computes encoder width
    for x in var_encoders:
        for y in x.encoders:
            exec("s = " + y + ".getWidth()", locals(), globals())
            encoder_width += s

    SP = SpatialPooler(
        inputDimensions=encoder_width,
        columnDimensions=pooler_out,
        potentialPct=0.8,
        globalInhibition=True,
        numActiveColumnsPerInhArea=pooler_out //
        50,  # Gets 2% of the total area
        boostStrength=1.0,
        wrapAround=False)
    TM = TemporalMemory(columnDimensions=(pooler_out, ),
                        cellsPerColumn=cell_col)

    Data['Anomaly'] = 0.0
    Data['Anomaly_Likelihood'] = 0.0

    # Train Spatial Pooler
    print("Spatial pooler learning")

    start = time.time()

    active_columns = np.zeros(pooler_out)

    for x in range(len(Data)):
        encoder = multiencode(var_encoders, Data, x)
        SP.compute(encoder, True, active_columns)

    end = time.time()
    print(end - start)

    # Temporal pooler
    print("Temporal pooler learning")

    start = time.time()

    A_score = np.zeros(len(Data))
    for x in range(len(Data)):
        encoder = multiencode(var_encoders, Data, x)
        SP.compute(encoder, False, active_columns)
        col_index = active_columns.nonzero()[0]
        TM.compute(col_index, learn=True)
        if x > 0:
            inter = set(col_index).intersection(Prev_pred_col)
            inter_l = len(inter)
            active_l = len(col_index)
            A_score[x] = 1 - (inter_l / active_l)
            Data.iat[x, -2] = A_score[x]
        Prev_pred_col = list(
            set(x // cell_col for x in TM.getPredictiveCells()))

    end = time.time()
    print(end - start)

    AL_score = np.zeros(len(Data))
    # Computes the likelihood of the anomaly
    for x in range(len(Data)):
        if x > 0:
            W_vec = A_score[max(0, x - W):x]
            W_prim_vec = A_score[max(0, x - W_prim):x]
            AL_score[x] = 1 - 2 * norm.sf(
                abs(np.mean(W_vec) - np.mean(W_prim_vec)) /
                max(np.std(W_vec), eps))
            Data.iat[x, -1] = AL_score[x]

    Data['Anomaly_flag'] = athreshold < Data['Anomaly_Likelihood']

    return Data
Esempio n. 16
0
def testSP():
    """ Run a SP test
  """

    elemSize = 400
    numSet = 42

    addNear = True
    numRecords = 2

    wantPlot = True

    poolPct = 0.5
    itr = 1
    doLearn = True

    while numRecords < 3:

        # Setup a SP
        sp = SpatialPooler(columnDimensions=(2048, 1),
                           inputDimensions=(1, elemSize),
                           potentialRadius=elemSize / 2,
                           numActiveColumnsPerInhArea=40,
                           spVerbosity=0,
                           stimulusThreshold=0,
                           seed=1,
                           potentialPct=poolPct,
                           globalInhibition=True)

        # Generate inputs using rand()
        inputs = generateRandomInput(numRecords, elemSize, numSet)
        if addNear:
            # Append similar entries (distance of 1)
            appendInputWithNSimilarValues(inputs, 42)

        inputSize = len(inputs)
        print('Num random records = %d, inputs to process %d' %
              (numRecords, inputSize))

        # Run a number of iterations, with learning on or off,
        # retrieve results from the last iteration only
        outputs = np.zeros((inputSize, 2048))

        numIter = 1
        if doLearn:
            numIter = itr

        for iter in range(numIter):
            for i in range(inputSize):
                time.sleep(0.001)
                if iter == numIter - 1:
                    # TODO: See https://github.com/numenta/nupic/issues/2072
                    sp.compute(inputs[i],
                               learn=doLearn,
                               activeArray=outputs[i])
                    #print outputs[i].sum(), outputs[i]
                else:
                    # TODO: See https://github.com/numenta/nupic/issues/2072
                    output = np.zeros(2048)
                    sp.compute(inputs[i], learn=doLearn, activeArray=output)

        # Build a plot from the generated input and output and display it
        distribMatrix = generatePlot(outputs, inputs)

        # If we don't want a plot, just continue
        if wantPlot:
            plt.imshow(distribMatrix, origin='lower', interpolation="nearest")
            plt.ylabel('SP (2048/40) distance in %')
            plt.xlabel('Input (400/42) distance in %')

            title = 'SP distribution'
            if doLearn:
                title += ', leaning ON'
            else:
                title += ', learning OFF'

            title += ', inputs = %d' % len(inputs)
            title += ', iterations = %d' % numIter
            title += ', poolPct =%f' % poolPct

            plt.suptitle(title, fontsize=12)
            plt.show()
            #plt.savefig(os.path.join('~/Desktop/ExperimentResults/videos5', '%s' % numRecords))
            #plt.clf()

        numRecords += 1

    return
Esempio n. 17
0
def testSPFile():
    """ Run test on the data file - the file has records previously encoded.
  """

    spSize = 2048
    spSet = 40

    poolPct = 0.5

    pattern = [50, 1000]
    doLearn = True

    PLOT_PRECISION = 100.0
    distribMatrix = np.zeros((PLOT_PRECISION + 1, PLOT_PRECISION + 1))

    inputs = []

    #file = open('~/Desktop/ExperimentResults/sampleArtificial.csv', 'rb')
    #elemSize = 400
    #numSet = 42

    #file = open('~/Desktop/ExperimentResults/sampleDataBasilOneField.csv', 'rb')
    #elemSize = 499
    #numSet = 7

    outdir = '~/Desktop/ExperimentResults/Basil100x21'
    inputFile = outdir + '.csv'
    file = open(inputFile, 'rb')

    elemSize = 100
    numSet = 21

    reader = csv.reader(file)

    for row in reader:
        input = np.array(list(map(float, row)), dtype=realDType)
        if len(input.nonzero()[0]) != numSet:
            continue

        inputs.append(input.copy())

    file.close()

    # Setup a SP
    sp = SpatialPooler(columnDimensions=(spSize, 1),
                       inputDimensions=(1, elemSize),
                       potentialRadius=elemSize / 2,
                       numActiveColumnsPerInhArea=spSet,
                       spVerbosity=0,
                       stimulusThreshold=0,
                       synPermConnected=0.10,
                       seed=1,
                       potentialPct=poolPct,
                       globalInhibition=True)

    cleanPlot = False

    doLearn = False

    print('Finished reading file, inputs/outputs to process =', len(inputs))

    size = len(inputs)

    for iter in range(100):

        print('Iteration', iter)

        # Learn
        if iter != 0:
            for learnRecs in range(pattern[0]):

                # TODO: See https://github.com/numenta/nupic/issues/2072
                ind = np.random.random_integers(0, size - 1, 1)[0]
                sp.compute(inputs[ind], learn=True, activeArray=outputs[ind])

        # Test
        for _ in range(pattern[1]):
            rand1 = np.random.random_integers(0, size - 1, 1)[0]
            rand2 = np.random.random_integers(0, size - 1, 1)[0]

            sp.compute(inputs[rand1], learn=False, activeArray=output1)
            sp.compute(inputs[rand2], learn=False, activeArray=output2)

            outDist = (abs(output1 - output2) > 0.1)
            intOutDist = int(outDist.sum() / 2 + 0.1)

            inDist = (abs(inputs[rand1] - inputs[rand2]) > 0.1)
            intInDist = int(inDist.sum() / 2 + 0.1)

            if intInDist != numSet or intOutDist != spSet:
                print(rand1, rand2, '-', intInDist, intOutDist)

            x = int(PLOT_PRECISION * intOutDist / spSet)
            y = int(PLOT_PRECISION * intInDist / numSet)
            if distribMatrix[x, y] < 0.1:
                distribMatrix[x, y] = 3
            else:
                if distribMatrix[x, y] < 10:
                    distribMatrix[x, y] += 1

        if True:
            plt.imshow(distribMatrix, origin='lower', interpolation="nearest")
            plt.ylabel('SP (%d/%d) distance in pct' % (spSize, spSet))
            plt.xlabel('Input (%d/%d) distance in pct' % (elemSize, numSet))

            title = 'SP distribution'
            title += ', iter = %d' % iter
            title += ', Pct =%f' % poolPct

            plt.suptitle(title, fontsize=12)

            #plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosArtData', '%s' % iter))
            plt.savefig(os.path.join(outdir, '%s' % iter))

            plt.clf()
            distribMatrix = np.zeros((PLOT_PRECISION + 1, PLOT_PRECISION + 1))
Esempio n. 18
0
def testSPNew():
    """ New version of the test"""

    elemSize = 400
    numSet = 42

    addNear = True
    numRecords = 1000

    wantPlot = False

    poolPct = 0.5
    itr = 5

    pattern = [60, 1000]
    doLearn = True
    start = 1
    learnIter = 0
    noLearnIter = 0

    numLearns = 0
    numTests = 0

    numIter = 1

    numGroups = 1000

    PLOT_PRECISION = 100.0
    distribMatrix = np.zeros((PLOT_PRECISION + 1, PLOT_PRECISION + 1))

    inputs = generateRandomInput(numGroups, elemSize, numSet)

    # Setup a SP
    sp = SpatialPooler(columnDimensions=(2048, 1),
                       inputDimensions=(1, elemSize),
                       potentialRadius=elemSize / 2,
                       numActiveColumnsPerInhArea=40,
                       spVerbosity=0,
                       stimulusThreshold=0,
                       synPermConnected=0.12,
                       seed=1,
                       potentialPct=poolPct,
                       globalInhibition=True)

    cleanPlot = False

    for i in range(numRecords):
        input1 = getRandomWithMods(inputs, 4)
        if i % 2 == 0:
            input2 = getRandomWithMods(inputs, 4)
        else:
            input2 = input1.copy()
            input2 = modifyBits(input2, 21)

        inDist = (abs(input1 - input2) > 0.1)
        intInDist = int(inDist.sum() / 2 + 0.1)
        #print intInDist

        if start == 0:
            doLearn = True
            learnIter += 1
            if learnIter == pattern[start]:
                numLearns += 1
                start = 1
                noLearnIter = 0
        elif start == 1:
            doLearn = False
            noLearnIter += 1
            if noLearnIter == pattern[start]:
                numTests += 1
                start = 0
                learnIter = 0
                cleanPlot = True

        # TODO: See https://github.com/numenta/nupic/issues/2072
        sp.compute(input1, learn=doLearn, activeArray=output1)
        sp.compute(input2, learn=doLearn, activeArray=output2)
        time.sleep(0.001)

        outDist = (abs(output1 - output2) > 0.1)
        intOutDist = int(outDist.sum() / 2 + 0.1)

        if not doLearn and intOutDist < 2 and intInDist > 10:
          """
      sp.spVerbosity = 10
      # TODO: See https://github.com/numenta/nupic/issues/2072
      sp.compute(input1, learn=doLearn, activeArray=output1)
      sp.compute(input2, learn=doLearn, activeArray=output2)
      sp.spVerbosity = 0


      print 'Elements has very small SP distance: %d' % intOutDist
      print output1.nonzero()
      print output2.nonzero()
      print sp._firingBoostFactors[output1.nonzero()[0]]
      print sp._synPermBoostFactors[output1.nonzero()[0]]
      print 'Input elements distance is %d' % intInDist
      print input1.nonzero()
      print input2.nonzero()
      sys.stdin.readline()
      """

        if not doLearn:
            x = int(PLOT_PRECISION * intOutDist / 40.0)
            y = int(PLOT_PRECISION * intInDist / 42.0)
            if distribMatrix[x, y] < 0.1:
                distribMatrix[x, y] = 3
            else:
                if distribMatrix[x, y] < 10:
                    distribMatrix[x, y] += 1

        #print i

        # If we don't want a plot, just continue
        if wantPlot and cleanPlot:
            plt.imshow(distribMatrix, origin='lower', interpolation="nearest")
            plt.ylabel('SP (2048/40) distance in %')
            plt.xlabel('Input (400/42) distance in %')

            title = 'SP distribution'

            #if doLearn:
            #  title += ', leaning ON'
            #else:
            #  title +=  ', learning OFF'

            title += ', learn sets = %d' % numLearns
            title += ', test sets = %d' % numTests
            title += ', iter = %d' % numIter
            title += ', groups = %d' % numGroups
            title += ', Pct =%f' % poolPct

            plt.suptitle(title, fontsize=12)
            #plt.show()

            plt.savefig(
                os.path.join('~/Desktop/ExperimentResults/videosNew',
                             '%s' % i))

            plt.clf()
            distribMatrix = np.zeros((PLOT_PRECISION + 1, PLOT_PRECISION + 1))
            cleanPlot = False
Esempio n. 19
0
def runHotgym(numRecords):
    with open(_PARAMS_PATH, "r") as f:
        modelParams = yaml.safe_load(f)["modelParams"]
        enParams = modelParams["sensorParams"]["encoders"]
        spParams = modelParams["spParams"]
        tmParams = modelParams["tmParams"]

    timeOfDayEncoder = DateEncoder(
        timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"])
    weekendEncoder = DateEncoder(
        weekend=enParams["timestamp_weekend"]["weekend"])
    scalarEncoder = RandomDistributedScalarEncoder(
        enParams["consumption"]["resolution"])

    encodingWidth = (timeOfDayEncoder.getWidth() + weekendEncoder.getWidth() +
                     scalarEncoder.getWidth())

    sp = SpatialPooler(
        # How large the input encoding will be.
        inputDimensions=(encodingWidth, ),
        # How many mini-columns will be in the Spatial Pooler.
        columnDimensions=(spParams["columnCount"], ),
        # What percent of the columns"s receptive field is available for potential
        # synapses?
        potentialPct=spParams["potentialPct"],
        # Potential radius should be set to the input size if there is global
        # inhibition.
        potentialRadius=encodingWidth,
        # This means that the input space has no topology.
        globalInhibition=spParams["globalInhibition"],
        localAreaDensity=spParams["localAreaDensity"],
        # Roughly 2%, giving that there is only one inhibition area because we have
        # turned on globalInhibition (40 / 2048 = 0.0195)
        numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
        # How quickly synapses grow and degrade.
        synPermInactiveDec=spParams["synPermInactiveDec"],
        synPermActiveInc=spParams["synPermActiveInc"],
        synPermConnected=spParams["synPermConnected"],
        # boostStrength controls the strength of boosting. Boosting encourages
        # efficient usage of SP columns.
        boostStrength=spParams["boostStrength"],
        # Random number generator seed.
        seed=spParams["seed"],
        # TODO: is this useful?
        # Determines if inputs at the beginning and end of an input dimension should
        # be considered neighbors when mapping columns to inputs.
        wrapAround=True)

    tm = TemporalMemory(
        # Must be the same dimensions as the SP
        columnDimensions=(tmParams["columnCount"], ),
        # How many cells in each mini-column.
        cellsPerColumn=tmParams["cellsPerColumn"],
        # A segment is active if it has >= activationThreshold connected synapses
        # that are active due to infActiveState
        activationThreshold=tmParams["activationThreshold"],
        initialPermanence=tmParams["initialPerm"],
        # TODO: This comes from the SP params, is this normal
        connectedPermanence=spParams["synPermConnected"],
        # Minimum number of active synapses for a segment to be considered during
        # search for the best-matching segments.
        minThreshold=tmParams["minThreshold"],
        # The max number of synapses added to a segment during learning
        maxNewSynapseCount=tmParams["newSynapseCount"],
        permanenceIncrement=tmParams["permanenceInc"],
        permanenceDecrement=tmParams["permanenceDec"],
        predictedSegmentDecrement=0.0,
        maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
        maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
        seed=tmParams["seed"])

    classifier = SDRClassifierFactory.create()
    results = []
    with open(_INPUT_FILE_PATH, "r") as fin:
        reader = csv.reader(fin)
        headers = reader.next()
        reader.next()
        reader.next()

        for count, record in enumerate(reader):

            if count >= numRecords: break

            # Convert data string into Python date object.
            dateString = datetime.datetime.strptime(record[0],
                                                    "%m/%d/%y %H:%M")
            # Convert data value string into float.
            consumption = float(record[1])

            # To encode, we need to provide zero-filled numpy arrays for the encoders
            # to populate.
            timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
            weekendBits = numpy.zeros(weekendEncoder.getWidth())
            consumptionBits = numpy.zeros(scalarEncoder.getWidth())

            # Now we call the encoders create bit representations for each value.
            timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
            weekendEncoder.encodeIntoArray(dateString, weekendBits)
            scalarEncoder.encodeIntoArray(consumption, consumptionBits)

            # Concatenate all these encodings into one large encoding for Spatial
            # Pooling.
            encoding = numpy.concatenate(
                [timeOfDayBits, weekendBits, consumptionBits])

            # Create an array to represent active columns, all initially zero. This
            # will be populated by the compute method below. It must have the same
            # dimensions as the Spatial Pooler.
            activeColumns = numpy.zeros(spParams["columnCount"])

            # Execute Spatial Pooling algorithm over input space.
            sp.compute(encoding, True, activeColumns)
            activeColumnIndices = numpy.nonzero(activeColumns)[0]

            # Execute Temporal Memory algorithm over active mini-columns.
            tm.compute(activeColumnIndices, learn=True)

            activeCells = tm.getActiveCells()

            # Get the bucket info for this input value for classification.
            bucketIdx = scalarEncoder.getBucketIndices(consumption)[0]

            # Run classifier to translate active cells back to scalar value.
            classifierResult = classifier.compute(recordNum=count,
                                                  patternNZ=activeCells,
                                                  classification={
                                                      "bucketIdx": bucketIdx,
                                                      "actValue": consumption
                                                  },
                                                  learn=True,
                                                  infer=True)

            # Print the best prediction for 1 step out.
            oneStepConfidence, oneStep = sorted(zip(
                classifierResult[1], classifierResult["actualValues"]),
                                                reverse=True)[0]
            print("1-step: {:16} ({:4.4}%)".format(oneStep,
                                                   oneStepConfidence * 100))
            results.append([oneStep, oneStepConfidence * 100, None, None])

        return results
Esempio n. 20
0
#  minval=0,
#  maxval=100)
#consumeEncoder = AdaptiveScalarEncoder(
#  n=400,
#  w=21)
consumeEncoder = SimHashDistributedScalarEncoder(n=400, w=21, resolution=0.25)
encodingWidth = (timeOfDayEncoder.getWidth() + weekendEncoder.getWidth() +
                 consumeEncoder.getWidth())
classifier = SDRClassifierFactory.create()
sp = SpatialPooler(inputDimensions=(encodingWidth, ),
                   columnDimensions=(COL_WIDTH),
                   potentialPct=0.85,
                   potentialRadius=encodingWidth,
                   globalInhibition=True,
                   localAreaDensity=-1.0,
                   numActiveColumnsPerInhArea=40,
                   synPermInactiveDec=0.005,
                   synPermActiveInc=0.04,
                   synPermConnected=0.1,
                   boostStrength=3.0,
                   seed=1956,
                   wrapAround=False)
tm = TemporalMemory(columnDimensions=(COL_WIDTH, ),
                    cellsPerColumn=32,
                    activationThreshold=16,
                    initialPermanence=0.21,
                    connectedPermanence=0.5,
                    minThreshold=12,
                    maxNewSynapseCount=20,
                    permanenceIncrement=0.1,
                    permanenceDecrement=0.1,
    def initialize(self):
        # Keep track of value range for spatial anomaly detection.
        self.minVal = None
        self.maxVal = None

        # Time of day encoder
        self.timeOfDayEncoder = DateEncoder(timeOfDay=(21, 9.49),
                                            name='time_enc')
        # RDSE encoder for the time series value.
        minResolution = 0.001
        rangePadding = abs(self.inputMax - self.inputMin) * 0.2
        minVal = self.inputMin - rangePadding
        maxVal = self.inputMax + rangePadding
        numBuckets = 130
        resolution = max(minResolution, (maxVal - minVal) / numBuckets)
        self.value_enc = RandomDistributedScalarEncoder(resolution=resolution,
                                                        name='value_rdse')

        # Spatial Pooler.
        encodingWidth = self.timeOfDayEncoder.getWidth(
        ) + self.value_enc.getWidth()
        self.sp = SpatialPooler(
            inputDimensions=(encodingWidth, ),
            columnDimensions=(2048, ),
            potentialPct=0.8,
            potentialRadius=encodingWidth,
            globalInhibition=1,
            numActiveColumnsPerInhArea=40,
            synPermInactiveDec=0.0005,
            synPermActiveInc=0.003,
            synPermConnected=0.2,
            boostStrength=0.0,
            seed=1956,
            wrapAround=True,
        )

        self.tm = TemporalMemory(
            columnDimensions=(2048, ),
            cellsPerColumn=32,
            activationThreshold=20,
            initialPermanence=.5,  # Increased to connectedPermanence.
            connectedPermanence=.5,
            minThreshold=13,
            maxNewSynapseCount=31,
            permanenceIncrement=0.04,
            permanenceDecrement=0.008,
            predictedSegmentDecrement=0.001,
            maxSegmentsPerCell=128,
            maxSynapsesPerSegment=
            128,  # Changed meaning. Also see connections.topology[2]
            seed=1993,
        )

        # Initialize the anomaly likelihood object
        numentaLearningPeriod = int(math.floor(self.probationaryPeriod / 2.0))
        self.anomalyLikelihood = anomaly_likelihood.AnomalyLikelihood(
            learningPeriod=numentaLearningPeriod,
            estimationSamples=self.probationaryPeriod - numentaLearningPeriod,
            reestimationPeriod=100,
        )

        self.age = 0
Esempio n. 22
0
    def _runLearnInference(self,
                           n=30,
                           w=15,
                           columnDimensions=2048,
                           numActiveColumnsPerInhArea=40,
                           spSeed=1951,
                           spVerbosity=0,
                           numTrainingRecords=100,
                           seed=42):
        # Instantiate two identical spatial pooler. One will be used only for
        # learning. The other will be trained with identical records, but with
        # random inference calls thrown in
        spLearnOnly = SpatialPooler(
            columnDimensions=(columnDimensions, 1),
            inputDimensions=(1, n),
            potentialRadius=n / 2,
            numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
            spVerbosity=spVerbosity,
            seed=spSeed,
            synPermInactiveDec=0.01,
            synPermActiveInc=0.2,
            synPermConnected=0.11,
        )

        spLearnInfer = SpatialPooler(
            columnDimensions=(columnDimensions, 1),
            inputDimensions=(1, n),
            potentialRadius=n / 2,
            numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
            spVerbosity=spVerbosity,
            seed=spSeed,
            synPermInactiveDec=0.01,
            synPermActiveInc=0.2,
            synPermConnected=0.11,
        )

        random.seed(seed)
        np.random.seed(seed)

        # Build up training set with numTrainingRecords patterns
        inputs = []  # holds post-encoded input patterns
        for i in xrange(numTrainingRecords):
            inputVector = np.zeros(n, dtype=realDType)
            inputVector[random.sample(xrange(n), w)] = 1
            inputs.append(inputVector)

        # Train each SP with identical inputs
        startTime = time.time()

        random.seed(seed)
        np.random.seed(seed)
        for i in xrange(numTrainingRecords):
            if spVerbosity > 0:
                print "Input #%d" % i
            # TODO: See https://github.com/numenta/nupic/issues/2072
            encodedInput = inputs[i]
            decodedOutput = np.zeros(columnDimensions)
            spLearnOnly.compute(encodedInput,
                                learn=True,
                                activeArray=decodedOutput)

        random.seed(seed)
        np.random.seed(seed)
        for i in xrange(numTrainingRecords):
            if spVerbosity > 0:
                print "Input #%d" % i
            # TODO: See https://github.com/numenta/nupic/issues/2072
            encodedInput = inputs[i]
            decodedOutput = np.zeros(columnDimensions)
            spLearnInfer.compute(encodedInput,
                                 learn=True,
                                 activeArray=decodedOutput)

        print "\nElapsed time: %.2f seconds\n" % (time.time() - startTime)

        # Test that both SP"s are identical by checking learning stats
        # A more in depth test would check all the coincidences, duty cycles, etc.
        # ala tpDiff
        # Edit: spDiff has been written as an in depth tester of the spatial pooler
        learnOnlyStats = spLearnOnly.getLearningStats()
        learnInferStats = spLearnInfer.getLearningStats()

        success = True
        # Check that the two spatial poolers are equivalent after the same training.
        success = success and spDiff(spLearnInfer, spLearnOnly)
        self.assertTrue(success)
        # Make sure that the pickled and loaded SPs are equivalent.
        spPickle = pickle.dumps(spLearnOnly, protocol=0)
        spLearnOnlyLoaded = pickle.loads(spPickle)
        success = success and spDiff(spLearnOnly, spLearnOnlyLoaded)
        self.assertTrue(success)
        for k in learnOnlyStats.keys():
            if learnOnlyStats[k] != learnInferStats[k]:
                success = False
                print "Stat", k, "is different:", learnOnlyStats[
                    k], learnInferStats[k]

        self.assertTrue(success)
        if success:
            print "Test succeeded"
Esempio n. 23
0
import numpy as np
from tqdm import tqdm

from nupic.encoders.scalar import ScalarEncoder
from nupic.algorithms.spatial_pooler import SpatialPooler
from nupic.algorithms.temporal_memory import TemporalMemory
from nupic.algorithms.sdr_classifier import SDRClassifier

N = 900
x = np.sin(np.arange(N) * 2 * np.pi / 30.0)
inputDimensions = (256, )
columnDimensions = (512, )

encoder = ScalarEncoder(21, -1.0, 1.0, n=inputDimensions[0])
sp = SpatialPooler(inputDimensions=inputDimensions,
                   columnDimensions=columnDimensions,
                   globalInhibition=True,
                   numActiveColumnsPerInhArea=21)
tm = TemporalMemory(columnDimensions=columnDimensions)
c = SDRClassifier(steps=[1], alpha=0.1, actValueAlpha=0.1, verbosity=0)

x_true = x[1:]
x_predict = np.zeros(len(x) - 1)

for i, xi in tqdm(enumerate(x[:-1])):
    encoded = encoder.encode(xi)
    bucketIdx = np.where(encoded > 0)[0][0]
    spd = np.zeros(columnDimensions[0])
    sp.compute(encoded, True, spd)
    active_indices = np.where(spd > 0)[0]
    tm.compute(active_indices)
Esempio n. 24
0
# Spatial Pooler Parameters

var_encoders = {Encoder('value', ['RDSE'])}
# Encoder('_index', ['TODE'])}

encoder_width = 0
for x in var_encoders:
    for y in x.encoders:
        exec("s = " + y + ".getWidth()")
        encoder_width += s

SP = SpatialPooler(
    inputDimensions=encoder_width,
    columnDimensions=pooler_out,
    potentialPct=0.8,
    globalInhibition=True,
    numActiveColumnsPerInhArea=pooler_out // 50,  # Gets 2% of the total area
    boostStrength=1.0,
    wrapAround=False)
TM = TemporalMemory(columnDimensions=(pooler_out, ), cellsPerColumn=cell_col)

# Train Spatial Pooler
start = time.time()

active_columns = np.zeros(pooler_out)

print("Spatial pooler learning")

for x in range(len(Data)):
    encoder = multiencode(var_encoders, Data, x)
    # e_val = RDSE.encode(Data['value'][x])
def runHotgym(numRecords):
    with open(_PARAMS_PATH, "r") as f:
        modelParams = yaml.safe_load(f)["modelParams"]
        enParams = modelParams["sensorParams"]["encoders"]
        spParams = modelParams["spParams"]
        tmParams = modelParams["tmParams"]

    scalarEncoder = RandomDistributedScalarEncoder(
        enParams["consumption"]["resolution"])
    scalarEncoder2 = RandomDistributedScalarEncoder(
        enParams["consumption2"]["resolution"])

    encodingWidth = (scalarEncoder.getWidth() + scalarEncoder2.getWidth())

    sp = SpatialPooler(
        inputDimensions=(encodingWidth, ),
        columnDimensions=(spParams["columnCount"], ),
        potentialPct=spParams["potentialPct"],
        potentialRadius=encodingWidth,
        globalInhibition=spParams["globalInhibition"],
        localAreaDensity=spParams["localAreaDensity"],
        numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
        synPermInactiveDec=spParams["synPermInactiveDec"],
        synPermActiveInc=spParams["synPermActiveInc"],
        synPermConnected=spParams["synPermConnected"],
        boostStrength=spParams["boostStrength"],
        seed=spParams["seed"],
        wrapAround=True)

    tm = TemporalMemory(
        columnDimensions=(tmParams["columnCount"], ),
        cellsPerColumn=tmParams["cellsPerColumn"],
        activationThreshold=tmParams["activationThreshold"],
        initialPermanence=tmParams["initialPerm"],
        connectedPermanence=spParams["synPermConnected"],
        minThreshold=tmParams["minThreshold"],
        maxNewSynapseCount=tmParams["newSynapseCount"],
        permanenceIncrement=tmParams["permanenceInc"],
        permanenceDecrement=tmParams["permanenceDec"],
        predictedSegmentDecrement=0.0,
        maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
        maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
        seed=tmParams["seed"])

    classifier = SDRClassifierFactory.create()
    results = []
    with open(_INPUT_FILE_PATH, "r") as fin:
        reader = csv.reader(fin)
        headers = reader.next()
        reader.next()
        reader.next()

        output = output_anomaly_generic_v1.NuPICFileOutput(_FILE_NAME)

        for count, record in enumerate(reader):

            if count >= numRecords: break

            # Convert data string into Python date object.
            #      dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
            # Convert data value string into float.
            prediction = float(record[1])
            prediction2 = float(record[2])

            # To encode, we need to provide zero-filled numpy arrays for the encoders
            # to populate.
            consumptionBits = numpy.zeros(scalarEncoder.getWidth())
            consumptionBits2 = numpy.zeros(scalarEncoder2.getWidth())

            # Now we call the encoders to create bit representations for each value.
            scalarEncoder.encodeIntoArray(prediction, consumptionBits)
            scalarEncoder2.encodeIntoArray(prediction2, consumptionBits2)

            # Concatenate all these encodings into one large encoding for Spatial
            # Pooling.
            encoding = numpy.concatenate([consumptionBits, consumptionBits2])

            # Create an array to represent active columns, all initially zero. This
            # will be populated by the compute method below. It must have the same
            # dimensions as the Spatial Pooler.
            activeColumns = numpy.zeros(spParams["columnCount"])

            # Execute Spatial Pooling algorithm over input space.
            sp.compute(encoding, True, activeColumns)
            activeColumnIndices = numpy.nonzero(activeColumns)[0]

            # Execute Temporal Memory algorithm over active mini-columns.
            tm.compute(activeColumnIndices, learn=True)

            activeCells = tm.getActiveCells()

            # Get the bucket info for this input value for classification.
            bucketIdx = scalarEncoder.getBucketIndices(prediction)[0]

            # Run classifier to translate active cells back to scalar value.
            classifierResult = classifier.compute(recordNum=count,
                                                  patternNZ=activeCells,
                                                  classification={
                                                      "bucketIdx": bucketIdx,
                                                      "actValue": prediction
                                                  },
                                                  learn=True,
                                                  infer=True)

            # Print the best prediction for 1 step out.
            oneStepConfidence, oneStep = sorted(zip(
                classifierResult[1], classifierResult["actualValues"]),
                                                reverse=True)[0]
            # print("1-step: {:16} ({:4.4}%)".format(oneStep, oneStepConfidence * 100))
            #      results.append([oneStep, oneStepConfidence * 100, None, None])
            results.append(
                [record[0], prediction, oneStep, oneStepConfidence * 100])
            output.write(record[0], prediction, oneStep,
                         oneStepConfidence * 100)

        output.close()
        return results
Esempio n. 26
0
    def testInhibition(self):
        """
    Test if the firing number of coincidences after inhibition
    equals spatial pooler numActiveColumnsPerInhArea.
    """
        # Miscellaneous variables:
        # n, w:                 n, w of encoders
        # inputLen:             Length of binary input
        # synPermConnected:     Spatial pooler synPermConnected
        # synPermActiveInc:     Spatial pooler synPermActiveInc
        # connectPct:           Initial connect percentage of permanences
        # columnDimensions:     Number of spatial pooler coincidences
        # numActiveColumnsPerInhArea:  Spatial pooler numActiveColumnsPerInhArea
        # stimulusThreshold:    Spatial pooler stimulusThreshold
        # spSeed:               Spatial pooler for initial permanences
        # stimulusThresholdInh: Parameter for inhibition, default value 0.00001
        # kDutyCycleFactor:     kDutyCycleFactor for dutyCycleTieBreaker in
        #                       Inhibition
        # spVerbosity:          Verbosity to print other sp initial parameters
        # testIter:             Testing iterations
        n = 100
        w = 15
        inputLen = 300
        columnDimensions = 2048
        numActiveColumnsPerInhArea = 40
        stimulusThreshold = 0
        spSeed = 1956
        stimulusThresholdInh = 0.00001
        kDutyCycleFactor = 0.01
        spVerbosity = 0
        testIter = 100

        spTest = SpatialPooler(
            columnDimensions=(columnDimensions, 1),
            inputDimensions=(1, inputLen),
            potentialRadius=inputLen / 2,
            numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
            spVerbosity=spVerbosity,
            stimulusThreshold=stimulusThreshold,
            seed=spSeed)
        initialPermanence = spTest._initialPermanence()
        spTest._masterPotentialM, spTest._masterPermanenceM = (
            spTest._makeMasterCoincidences(spTest.numCloneMasters,
                                           spTest._coincRFShape,
                                           spTest.potentialPct,
                                           initialPermanence, spTest.random))

        spTest._updateInhibitionObj()
        boostFactors = numpy.ones(columnDimensions)

        for i in range(testIter):
            spTest._iterNum = i
            # random binary input
            input_ = numpy.zeros((1, inputLen))
            nonzero = numpy.random.random(inputLen)
            input_[0][numpy.where(nonzero < float(w) / float(n))] = 1

            # overlap step
            spTest._computeOverlapsFP(
                input_, stimulusThreshold=spTest.stimulusThreshold)
            spTest._overlaps *= boostFactors
            onCellIndices = numpy.where(spTest._overlaps > 0)
            spTest._onCells.fill(0)
            spTest._onCells[onCellIndices] = 1
            denseOn = spTest._onCells

            # update _dutyCycleBeforeInh
            spTest.dutyCyclePeriod = min(i + 1, 1000)
            spTest._dutyCycleBeforeInh = (
                (spTest.dutyCyclePeriod - 1) * spTest._dutyCycleBeforeInh +
                denseOn) / spTest.dutyCyclePeriod
            dutyCycleTieBreaker = spTest._dutyCycleAfterInh.copy()
            dutyCycleTieBreaker *= kDutyCycleFactor

            # inhibition step
            numOn = spTest._inhibitionObj.compute(
                spTest._overlaps + dutyCycleTieBreaker,
                spTest._onCellIndices,
                stimulusThresholdInh,  # stimulusThresholdInh
                max(spTest._overlaps) / 1000,  # addToWinners
            )
            # update _dutyCycleAfterInh
            spTest._onCells.fill(0)
            onCellIndices = spTest._onCellIndices[0:numOn]
            spTest._onCells[onCellIndices] = 1
            denseOn = spTest._onCells
            spTest._dutyCycleAfterInh = (
                ((spTest.dutyCyclePeriod - 1) * spTest._dutyCycleAfterInh +
                 denseOn) / spTest.dutyCyclePeriod)

            # learning step
            spTest._adaptSynapses(onCellIndices, [], input_)

            # update boostFactor
            spTest._updateBoostFactors()
            boostFactors = spTest._firingBoostFactors

            # update dutyCycle and boost
            if ((spTest._iterNum + 1) % 50) == 0:
                spTest._updateInhibitionObj()
                spTest._updateMinDutyCycles(spTest._dutyCycleBeforeInh,
                                            spTest.minPctDutyCycleBeforeInh,
                                            spTest._minDutyCycleBeforeInh)
                spTest._updateMinDutyCycles(spTest._dutyCycleAfterInh,
                                            spTest.minPctDutyCycleAfterInh,
                                            spTest._minDutyCycleAfterInh)

            # test numOn and spTest.numActiveColumnsPerInhArea
            self.assertEqual(
                numOn, spTest.numActiveColumnsPerInhArea,
                "Error at input %s, actual numOn are: %i, "
                "numActivePerInhAre is: %s" %
                (i, numOn, numActiveColumnsPerInhArea))