Example #1
0
def main():
  x = 10
  y = 10
  steps = 10000
  world = np.array([i for i in xrange(625)])
  world.resize((25, 25))
  spInputSize = 21*21
  sp = SpatialPooler(
      inputDimensions=(spInputSize,),
      columnDimensions=(25,),
      potentialRadius=spInputSize,
      numActiveColumnsPerInhArea=1,
      synPermActiveInc=0.1,
      synPermInactiveDec=0.5,
      boostStrength=1.0,
  )
  csFields = generateCenterSurroundFields()
  output = np.zeros((25,), dtype=np.uint32)
  for _ in xrange(steps):
    active = getActive(world, x, y)
    assert len(active) == 25, "{}, {}: {}".format(x, y, active)
    activeInput = np.zeros((625,), dtype=np.uint32)
    for v in active:
      activeInput[v] = 1
    centerSurround = processCenterSurround(csFields, activeInput)
    print centerSurround

    sp.compute(centerSurround, True, output)
    x, y = getNewLocation(x, y, 25, 2, False)

  for i in xrange(25):
    permanence = np.zeros((spInputSize,))
    sp.getPermanence(i, permanence)
    plt.imshow(permanence.reshape((21, 21)), cmap="hot", interpolation="nearest")
    plt.show()
  def test_whether_is_the_same_as_spatial_pooler(self):
    """
    Naive reality check for the encoding function 
    of the lateral pooler implementation.
    """
    n = 1024
    m = 784
    d = 100
    w = 20

    X = np.random.randint(0,2,size=(m,d))
    Y_nup = np.zeros((n,d))
    Y_lat = np.zeros((n,d))

    params_nup = {
        "inputDimensions": [m,1],
        "columnDimensions": [n,1],
        "potentialRadius": n,
        "potentialPct": 1.0,
        "globalInhibition": True,
        "localAreaDensity": -1.0,
        "numActiveColumnsPerInhArea": w,
        "stimulusThreshold": 0,
        "synPermInactiveDec": 0.05,
        "synPermActiveInc"  : 0.1,
        "synPermConnected"  : 0.5,
        "minPctOverlapDutyCycle": 0.001,
        "dutyCyclePeriod": 1000,
        "boostStrength"  : 100.0,
        "seed": 1936 }

    params_lat = params_nup.copy()
    params_lat["lateralLearningRate"]  = 0.0
    params_lat["enforceDesiredWeight"] = False

    sp_nup = SpatialPooler(**params_nup)
    sp_lat = LateralPooler(**params_lat)


    for t in range(d):
      sp_nup.compute(X[:,t], False, Y_nup[:,t])
      sp_lat.compute(X[:,t], False, Y_lat[:,t])
    
    self.assertTrue(np.all(Y_nup == Y_lat), 
      "Produces wrong output even without learning.")


    for t in range(d):
      sp_nup.compute(X[:,t], True, Y_nup[:,t])
      sp_lat.compute(X[:,t], True, Y_lat[:,t])

    self.assertTrue(np.all(Y_nup == Y_lat), 
      "Wrong outputs, something diverges during learning.")

    W_nup = get_W(sp_nup)
    W_lat = get_W(sp_lat)
    self.assertTrue(np.all(W_nup == W_lat), 
      "Wrong synaptic weights, something diverges during learning.")
Example #3
0
 def create_sp(self):
     sp = SpatialPooler(
       # How large the input encoding will be.
       inputDimensions=(len(self.encoding[0])),
       # How many mini-columns will be in the Spatial Pooler.
       columnDimensions=(2048),
       # What percent of the columns's receptive field is 
       # available for potential synapses?
       potentialPct=0.85,
       # This means that the input space has no topology.
       globalInhibition=True,
       localAreaDensity=-1.0,
       # Roughly 2%, giving that there is only one inhibition area because 
       # we have turned on globalInhibition (40 / 2048 = 0.0195)
       numActiveColumnsPerInhArea=40.0,
       # How quickly synapses grow and degrade.
       synPermInactiveDec=0.005,
       synPermActiveInc=0.04,
       synPermConnected=0.1,
       # boostStrength controls the strength of boosting. Boosting 
       # encourages efficient usage of SP columns.
       boostStrength=3.0,
       # Random number generator seed.
       seed=1956,
       # Determines if inputs at the beginning and end of an input dimension 
       # should be considered neighbors when mapping columns to inputs.
       wrapAround=False)        
     return sp
Example #4
0
def init_imgs(write_output):
    #problems = parse_images.get_problems(folder_name)

    folder_name = 'Data/raw'
    problems = []
    for sub_folder in os.listdir(folder_name):
        f = os.path.join(folder_name, sub_folder)
        problems += parse_images.get_problems(f)
    print(len(problems))

    for problem in problems:
        problem['Input'] = problem['Input'].reshape((3, -1))
        problem['Output'] = problem['Output'].reshape((6, -1))

    # dimenstions
    num_cols1 = len(problems[0]['Input'][0])
    tm_cols = num_cols1
    layers = []
    if not write_output:
        num_cols2 = num_cols1 / 2
        num_cols3 = num_cols2 / 2
        sp1 = SpatialPooler(inputDimensions=(num_cols1, ),
                            columnDimensions=(num_cols2, ),
                            numActiveColumnsPerInhArea=-1,
                            localAreaDensity=0.05)

        sp2 = SpatialPooler(inputDimensions=(num_cols2, ),
                            columnDimensions=(num_cols3, ),
                            numActiveColumnsPerInhArea=-1,
                            localAreaDensity=0.05)
        tm_cols = num_cols3
        layers = [(sp1, sp_compute), (sp2, sp_compute)]

    bckTM = BTM(numberOfCols=tm_cols,
                cellsPerColumn=10,
                initialPerm=0.5,
                connectedPerm=0.5,
                minThreshold=10,
                newSynapseCount=10,
                activationThreshold=10,
                pamLength=10)

    layers += [(bckTM, bk_tm_compute)]
    return (layers, problems)
Example #5
0
    def testCompatibilityCppPyDirectCall1D(self):
        """Check SP implementations have same behavior with 1D input."""

        pySp = PySpatialPooler(inputDimensions=[121], columnDimensions=[300])
        cppSp = CPPSpatialPooler(inputDimensions=[121], columnDimensions=[300])

        data = numpy.zeros([121], dtype=uintType)
        for i in range(21):
            data[i] = 1

        nCols = 300
        d1 = numpy.zeros(nCols, dtype=uintType)
        d2 = numpy.zeros(nCols, dtype=uintType)

        pySp.compute(data, True, d1)  # learn
        cppSp.compute(data, True, d2)

        d1 = d1.nonzero()[0].tolist()
        d2 = d2.nonzero()[0].tolist()
        self.assertListEqual(
            d1, d2, "SP outputs are not equal: \n%s \n%s" % (str(d1), str(d2)))
Example #6
0
def main():
    x = 10
    y = 10
    steps = 10000
    history = []
    world = np.array([i for i in xrange(625)])
    world.resize((25, 25))
    sp = SpatialPooler(
        inputDimensions=(625, ),
        columnDimensions=(25, ),
        potentialRadius=625,
        numActiveColumnsPerInhArea=1,
    )
    output = np.zeros((25, ), dtype=np.uint32)
    for _ in xrange(steps):
        active = getActive(world, x, y)
        assert len(active) == 25, "{}, {}: {}".format(x, y, active)
        activeInput = np.zeros((625, ), dtype=np.uint32)
        for v in active:
            activeInput[v] = 1
        history.append(active)
        sp.compute(activeInput, True, output)
        x, y = getNewLocation(x, y, 25, 2, False)

    for i in xrange(25):
        permanence = np.zeros((625, ))
        sp.getPermanence(i, permanence)
        plt.imshow(permanence.reshape((25, 25)),
                   cmap="hot",
                   interpolation="nearest")
        plt.show()
Example #7
0
def runTrial(ww, numColumns, potentialPct, inc, dec, mpo, dutyCycle, boost, steps, rr, spW, stimulusThreshold, connected, stepSize, jumpProb, directionStability):
  ws = ww ** 2
  x = 10
  y = 10
  locationHeatmap = np.zeros((ww, ww))
  history = []
  world = np.array([i for i in xrange(ws)])
  world.resize((ww, ww))
  sp = SpatialPooler(
      inputDimensions=(ws,),
      columnDimensions=(numColumns,),
      potentialRadius=ws,
      potentialPct=potentialPct,
      numActiveColumnsPerInhArea=spW,
      stimulusThreshold=stimulusThreshold,
      synPermActiveInc=inc,
      synPermInactiveDec=dec,
      synPermConnected=connected,
      minPctOverlapDutyCycle=mpo,
      dutyCyclePeriod=dutyCycle,
      boostStrength=boost,
      seed=1936,
      globalInhibition=True,
  )
  output = np.zeros((numColumns,), dtype=np.uint32)
  direction = 0
  for i in xrange(steps):
    locationHeatmap[x][y] += 1
    active = getActive(world, ww, x, y, rr)
    history.append(active)
    activeInput = np.zeros((ws,), dtype=np.uint32)
    for v in active:
      activeInput[v] = 1
    sp.compute(activeInput, True, output)
    x, y, direction = getNewLocation(x, y, ww, rr, wrap=True, locationHeatmap=locationHeatmap, stepSize=stepSize, jumpProb=jumpProb, direction=direction, directionStability=directionStability)

    if (i + 1) % 100 == 0:
      saveImage(history, ws, ww, numColumns, locationHeatmap, potentialPct, inc, dec, mpo, dutyCycle, boost, rr, spW, i+1, sp)

  saveImage(history, ws, ww, numColumns, locationHeatmap, potentialPct, inc, dec, mpo, dutyCycle, boost, rr, spW, steps, sp)
Example #8
0
def definir_SP(SIZE_ENCODER_):

    """ 
    retorna a classe sp 
    """

    N_COLUMNS= 2048

    sp = SpatialPooler(
        inputDimensions = (SIZE_ENCODER_,),
        columnDimensions = ( N_COLUMNS,), # in this case we will use 2048 mini-columns distributed in a "linear array" ...
            
        potentialRadius = SIZE_ENCODER_, # i set the potential radius of each mini-column as the whole ...
        #input space
            
        potentialPct = 0.8, # how many bits on the input space that should have some permanence with each mini-column
        ## attention: having a permanence value doesn't mean that it will be connected, to be connected the "connection...
        ## force" / permanence needs to be higher than the threshold.
            
        globalInhibition = True,  # means that the winning columns are selected in the neighborhood, though in this code...
        #we're dealing as if all the columns are neigbhors with one another
        localAreaDensity = -1.0,
        numActiveColumnsPerInhArea = NUM_ACTIVE_COLUMNS,
        stimulusThreshold = 0,
        ##Well, if we set the number of active columns per input, than there is no need to set an stimulusThreshold
        ##First = because the simulusTHreshold will be already set as the sum of permanences of the 40th column...@@NEED TO CHECK
        ##any other mini-column with less than it won't be active on this input 
            
        synPermInactiveDec = 0.0005, #if a column is active, the off (that aren't 1 in input) bits which it is connected ...
        # will have a decrement on the "synapse force"/permance @@
            
        synPermActiveInc = 0.003,#if a column is active, the on (that are 1 in input) bits which it is connected ...
        # will have a increment on the "synapse force"/permance @@
            
        synPermConnected = 0.2, #how much the "strength" of the connection between the on bit and the mini-column ...
        #needs to be for they to be connected. @@

        # @@ what needs to be checked is if the bits with synPermConnected < 0.1 will decrement ou increment when a column ...
        # is active - but i think they will increment

        minPctOverlapDutyCycle = 0.001, #  number between 0 and 1.0, used to set a floor on how often a column 
        #...should have at least stimulusThreshold active inputs  
            
        dutyCyclePeriod = 100, # how many "inputs seen" this should happen
            
        boostStrength = 0.01,

        seed = 47,
        spVerbosity = 0,
        wrapAround = False
    )
    return  sp
    def test_whether_is_the_same_as_spatial_pooler(self):
        """
    Naive reality check for the encoding function 
    of the lateral pooler implementation.
    """
        n = 1024
        m = 784
        d = 100
        w = 20

        X = np.random.randint(0, 2, size=(m, d))
        Y_nup = np.zeros((n, d))
        Y_lat = np.zeros((n, d))

        params_nup = {
            "inputDimensions": [m, 1],
            "columnDimensions": [n, 1],
            "potentialRadius": n,
            "potentialPct": 1.0,
            "globalInhibition": True,
            "localAreaDensity": -1.0,
            "numActiveColumnsPerInhArea": w,
            "stimulusThreshold": 0,
            "synPermInactiveDec": 0.05,
            "synPermActiveInc": 0.1,
            "synPermConnected": 0.5,
            "minPctOverlapDutyCycle": 0.001,
            "dutyCyclePeriod": 1000,
            "boostStrength": 100.0,
            "seed": 1936
        }

        params_lat = params_nup.copy()
        params_lat["lateralLearningRate"] = 0.0
        params_lat["enforceDesiredWeight"] = False

        sp_nup = SpatialPooler(**params_nup)
        sp_lat = LateralPooler(**params_lat)

        for t in range(d):
            sp_nup.compute(X[:, t], False, Y_nup[:, t])
            sp_lat.compute(X[:, t], False, Y_lat[:, t])

        self.assertTrue(np.all(Y_nup == Y_lat),
                        "Produces wrong output even without learning.")

        for t in range(d):
            sp_nup.compute(X[:, t], True, Y_nup[:, t])
            sp_lat.compute(X[:, t], True, Y_lat[:, t])

        self.assertTrue(np.all(Y_nup == Y_lat),
                        "Wrong outputs, something diverges during learning.")

        W_nup = get_W(sp_nup)
        W_lat = get_W(sp_lat)
        self.assertTrue(
            np.all(W_nup == W_lat),
            "Wrong synaptic weights, something diverges during learning.")
def main():
  x = 10
  y = 10
  steps = 10000
  ww = 50
  wn = ww ** 2
  rr = 3
  rw = rr * 2 + 1
  nCols = 25
  history = []
  world = np.array([i for i in xrange(wn)])
  world.resize((ww, ww))
  binaryWorld = np.zeros((wn,), dtype=np.uint32)
  binaryWorld[:wn/2] = 1
  np.random.shuffle(binaryWorld)
  sp = SpatialPooler(
      inputDimensions=(rw ** 2,),
      columnDimensions=(nCols,),
      potentialRadius=25,
      potentialPct=1.0,
      numActiveColumnsPerInhArea=1,
      synPermActiveInc=0.01,
      synPermInactiveDec=0.003,
      boostStrength=15.0,
      dutyCyclePeriod=5,
  )
  output = np.zeros((nCols,), dtype=np.uint32)
  for _ in xrange(steps):
    active = getActive(world, ww, x, y, rr)
    assert len(active) == (rw) ** 2, "{}, {}: {}".format(x, y, active)
    active = np.array(list(active))
    activeInput = binaryWorld[active]
    sp.compute(activeInput, True, output)
    x, y = getNewLocation(x, y, ww, rr, False)

  # Check firing fields
  sp.setBoostStrength(0.0)

  firingFields = {}
  for i in xrange(nCols):
    firingFields[i] = np.zeros((ww-rw, ww-rw), dtype=np.uint32)
  for i in xrange(0, ww-rw+1):
    for j in xrange(0, ww-rw+1):
      active = getActive(world, ww, i, j, rr)
      active = np.array(list(active))
      activeInput = binaryWorld[active]
      sp.compute(activeInput, True, output)
      for column in list(output.nonzero()):
        firingFields[column[0]][i:i+rr, j:j+rr] += 1

  for col, ff in firingFields.iteritems():
    plt.imshow(ff, cmap="hot", interpolation="nearest")
    plt.show()
  def testCompatibilityCppPyDirectCall2D(self):
    """Check SP implementations have same behavior with 2D input."""

    pySp = PySpatialPooler(
        inputDimensions=[121, 1], columnDimensions=[30, 30])
    cppSp = CPPSpatialPooler(
        inputDimensions=[121, 1], columnDimensions=[30, 30])

    data = numpy.zeros([121, 1], dtype=uintType)
    for i in xrange(21):
      data[i][0] = 1

    nCols = 900
    d1 = numpy.zeros(nCols, dtype=uintType)
    d2 = numpy.zeros(nCols, dtype=uintType)

    pySp.compute(data, True, d1) # learn
    cppSp.compute(data, True, d2)

    d1 = d1.nonzero()[0].tolist()
    d2 = d2.nonzero()[0].tolist()
    self.assertListEqual(
        d1, d2, "SP outputs are not equal: \n%s \n%s" % (str(d1), str(d2)))
Example #12
0
def loadNetwork(path):
    """ Deserialize the network from the given path
    """
    # Create the network structure
    (net, layers) = createNetwork()

    # Replace algorithm components with loaded (connections etc.)
    for key, layer in layers.iteritems():
        filename = "%s/%s.tmp" % (path, key)
        print 'load', key, filename
        with open(filename, "rb") as f:
            # AgentStateRegion with ETM instance
            if 'L5_TM' in key:
                proto = AgentStateRegionProto.read_packed(f)
                layers[key] = AgentStateRegion.readFromProto(proto)
            # ReinforcementRegion with ETM instance
            elif any(name in key for name in ['D1_TM', 'D2_TM']):
                proto = ReinforcementRegionProto.read_packed(f)
                layers[key] = ReinforcementRegion.readFromProto(proto)
            # TemporalPoolerRegion using UnionTemporalPooler (extended Spatial Pooler)
            elif 'TP' in key:
                proto = MyTemporalPoolerRegionProto.read_packed(f)
                layers[key] = MyTemporalPoolerRegion.readFromProto(proto)
            elif 'Motor' in key:
                proto = MotorRegionProto.read_packed(f)
                layers[key] = MRegion.readFromProto(proto)
            else:
                # MySPRegion - sp cpp instance (L4,L5,D1,D2)
                if 'SP' in key:
                    proto = SpatialPoolerProto.read_packed(f)
                    instance = SpatialPooler.read(proto)
                # MyTMRegion - cpp backtrackingTM instance (L2,L3)
                elif any(name in key for name in ['L2_TM', 'L3_TM']):
                    instance = BacktrackingTMCPP.readFromFile(f)
                # ExtendedTemporalMemory - cpp instance (L4,L5,D1,D2)
                elif 'TM' in key:
                    # Read from file - schema.read(file)
                    proto = ExtendedTemporalMemoryProto.read_packed(f)
                    # Generate class instance from the schema
                    instance = ExtendedTemporalMemory.read(proto)
                elif 'sensor' in key:
                    continue
                layer.setAlgorithmInstance(instance)

    return (net, layers)
Example #13
0
def main():
    # cluster similar inputs together in SDR space
    s = SpatialPooler()
    print(type(s))

    # powerful sequence memory in SDR space
    t = TemporalMemory()
    print(type(t))

    # computes rolling Gaussian based on raw anomaly scores and then their
    # likelihood
    a = AnomalyLikelihood()
    print(type(a))

    # temporally groups active cell sets from TM
    u = UnionTemporalPooler()
    print(type(u))

    # learning pairings of Union representations and labeled classes
    c = SDRClassifier()
    print(type(c))
def main():
    x = 10
    y = 10
    steps = 10000
    world = np.array([i for i in xrange(625)])
    world.resize((25, 25))
    spInputSize = 21 * 21
    sp = SpatialPooler(
        inputDimensions=(spInputSize, ),
        columnDimensions=(25, ),
        potentialRadius=spInputSize,
        numActiveColumnsPerInhArea=1,
        synPermActiveInc=0.1,
        synPermInactiveDec=0.5,
        boostStrength=1.0,
    )
    csFields = generateCenterSurroundFields()
    output = np.zeros((25, ), dtype=np.uint32)
    for _ in xrange(steps):
        active = getActive(world, x, y)
        assert len(active) == 25, "{}, {}: {}".format(x, y, active)
        activeInput = np.zeros((625, ), dtype=np.uint32)
        for v in active:
            activeInput[v] = 1
        centerSurround = processCenterSurround(csFields, activeInput)
        print centerSurround

        sp.compute(centerSurround, True, output)
        x, y = getNewLocation(x, y, 25, 2, False)

    for i in xrange(25):
        permanence = np.zeros((spInputSize, ))
        sp.getPermanence(i, permanence)
        plt.imshow(permanence.reshape((21, 21)),
                   cmap="hot",
                   interpolation="nearest")
        plt.show()
Example #15
0
def HTM_AD(
        Data='Test',
        vars={'value': ['num']},
        prec_param=5,
        pooler_out=2024,  # Number of columns of the pooler output
        cell_col=5,  # HTM cells per column
        W=72,  # Window parameter
        W_prim=5,  # Local window for anomaly detection likelihood
        eps=1e-6,  # to Avoid by zero divisions
        athreshold=0.95):
    """
    This function performs HTM based anomaly detection on a time series provided
    :param Data:
    :param vars: Possible values: num, tod, weekend
    :param prec_param: A parameter that defines how much precision the number encoder has
        The encoder precision depends on the variability of the data,
        The real precision is computed taking into account both the precision parameter and data std
        A high precision might mean a high error at predicting the variable value in noisy variables
    :param pooler_out: Number of columns of the pooler output
    :param cell_col: HTM cells per column
    :param W: Window parameter
    :param W_prim: Local window for anomaly detection likelihood
    :param eps: to Avoid by zero divisions
    :param athreshold: To classify based on anomaly likelihood whether there is an anomaly or not
    :return: The Data + 3 columns
        Anomaly: indicates the error of within the value predicted by the HTM network
        Anomaly_likelihood: indicates the likelihood of the data into being anomalous
        Anomaly_flag: classifies the data in anomalous vs non anomalous
    """

    if Data == 'Test':  # If there is not data available, simply loads the temperature benchmark dataset
        # Import data
        Data = pd.read_csv('anomaly_API/Data/sample.csv',
                           parse_dates=True,
                           index_col='timestamp')
        Data = Data.resample('H').bfill().interpolate()

    TODE = DateEncoder(timeOfDay=(21, 1))
    WENDE = DateEncoder(weekend=21)

    var_encoders = set()
    # Spatial Pooler Parameters
    for x in vars:
        for y in vars[x]:
            if y == 'num':
                exec(
                    "RDSE_" + x +
                    " = RandomDistributedScalarEncoder(resolution=Data['" + x +
                    "'].std()/prec_param)", locals(), globals())
                var_encoders.add(Encoder(x, ["RDSE_" + x]))
            elif y == 'weekend':
                var_encoders.add(Encoder(x, ["WENDE"]))
            elif y == 'tod':
                var_encoders.add(Encoder(x, ["TODE"]))
            else:
                return {"error": "Variable encoder type is not recognized "}

    encoder_width = 0  # Computes encoder width
    for x in var_encoders:
        for y in x.encoders:
            exec("s = " + y + ".getWidth()", locals(), globals())
            encoder_width += s

    SP = SpatialPooler(
        inputDimensions=encoder_width,
        columnDimensions=pooler_out,
        potentialPct=0.8,
        globalInhibition=True,
        numActiveColumnsPerInhArea=pooler_out //
        50,  # Gets 2% of the total area
        boostStrength=1.0,
        wrapAround=False)
    TM = TemporalMemory(columnDimensions=(pooler_out, ),
                        cellsPerColumn=cell_col)

    Data['Anomaly'] = 0.0
    Data['Anomaly_Likelihood'] = 0.0

    # Train Spatial Pooler
    print("Spatial pooler learning")

    start = time.time()

    active_columns = np.zeros(pooler_out)

    for x in range(len(Data)):
        encoder = multiencode(var_encoders, Data, x)
        SP.compute(encoder, True, active_columns)

    end = time.time()
    print(end - start)

    # Temporal pooler
    print("Temporal pooler learning")

    start = time.time()

    A_score = np.zeros(len(Data))
    for x in range(len(Data)):
        encoder = multiencode(var_encoders, Data, x)
        SP.compute(encoder, False, active_columns)
        col_index = active_columns.nonzero()[0]
        TM.compute(col_index, learn=True)
        if x > 0:
            inter = set(col_index).intersection(Prev_pred_col)
            inter_l = len(inter)
            active_l = len(col_index)
            A_score[x] = 1 - (inter_l / active_l)
            Data.iat[x, -2] = A_score[x]
        Prev_pred_col = list(
            set(x // cell_col for x in TM.getPredictiveCells()))

    end = time.time()
    print(end - start)

    AL_score = np.zeros(len(Data))
    # Computes the likelihood of the anomaly
    for x in range(len(Data)):
        if x > 0:
            W_vec = A_score[max(0, x - W):x]
            W_prim_vec = A_score[max(0, x - W_prim):x]
            AL_score[x] = 1 - 2 * norm.sf(
                abs(np.mean(W_vec) - np.mean(W_prim_vec)) /
                max(np.std(W_vec), eps))
            Data.iat[x, -1] = AL_score[x]

    Data['Anomaly_flag'] = athreshold < Data['Anomaly_Likelihood']

    return Data
Example #16
0
uintType = "uint32"
inputDimensions = (1000,1)
columnDimensions = (2048,1)
inputSize = np.array(inputDimensions).prod()
columnNumber = np.array(columnDimensions).prod()
inputArray = np.zeros(inputSize, dtype=uintType)

for i in range(inputSize):
  inputArray[i] = random.randrange(2)

activeCols = np.zeros(columnNumber, dtype=uintType)
sp = SP(inputDimensions,
  columnDimensions,
  potentialRadius = int(0.5*inputSize),
  numActiveColumnsPerInhArea = int(0.02*columnNumber),
  globalInhibition = True,
  seed = 1,
  synPermActiveInc = 0.01,
  synPermInactiveDec = 0.008
   )

# Part 1:
# -------
# A column connects to a subset of the input vector (specified
# by both the potentialRadius and potentialPct). The overlap score
# for a column is the number of connections to the input that become
# active when presented with a vector. When learning is 'on' in the SP,
# the active connections are reinforced, whereas those inactive are
# depressed (according to parameters synPermActiveInc and synPermInactiveDec.
# In order for the SP to create a sparse representation of the input, it
# will select a small percentage (usually 2%) of its most active columns,
Example #17
0
  def testInhibition(self):
    """
    Test if the firing number of coincidences after inhibition
    equals spatial pooler numActiveColumnsPerInhArea.
    """
    # Miscellaneous variables:
    # n, w:                 n, w of encoders
    # inputLen:             Length of binary input
    # synPermConnected:     Spatial pooler synPermConnected
    # synPermActiveInc:     Spatial pooler synPermActiveInc
    # connectPct:           Initial connect percentage of permanences
    # columnDimensions:     Number of spatial pooler coincidences
    # numActiveColumnsPerInhArea:  Spatial pooler numActiveColumnsPerInhArea
    # stimulusThreshold:    Spatial pooler stimulusThreshold
    # spSeed:               Spatial pooler for initial permanences
    # stimulusThresholdInh: Parameter for inhibition, default value 0.00001
    # kDutyCycleFactor:     kDutyCycleFactor for dutyCycleTieBreaker in
    #                       Inhibition
    # spVerbosity:          Verbosity to print other sp initial parameters
    # testIter:             Testing iterations
    n = 100
    w = 15
    inputLen = 300
    columnDimensions = 2048
    numActiveColumnsPerInhArea = 40
    stimulusThreshold = 0
    spSeed = 1956
    stimulusThresholdInh = 0.00001
    kDutyCycleFactor = 0.01
    spVerbosity = 0
    testIter = 100

    spTest = SpatialPooler(
                           columnDimensions=(columnDimensions, 1),
                           inputDimensions=(1, inputLen),
                           potentialRadius=inputLen / 2,
                           numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
                           spVerbosity=spVerbosity,
                           stimulusThreshold=stimulusThreshold,
                           seed=spSeed
                           )
    initialPermanence = spTest._initialPermanence()
    spTest._masterPotentialM, spTest._masterPermanenceM = (
        spTest._makeMasterCoincidences(spTest.numCloneMasters,
                                       spTest._coincRFShape,
                                       spTest.potentialPct,
                                       initialPermanence,
                                       spTest.random))

    spTest._updateInhibitionObj()
    boostFactors = numpy.ones(columnDimensions)

    for i in range(testIter):
      spTest._iterNum = i
      # random binary input
      input_ = numpy.zeros((1, inputLen))
      nonzero = numpy.random.random(inputLen)
      input_[0][numpy.where (nonzero < float(w)/float(n))] = 1

      # overlap step
      spTest._computeOverlapsFP(input_,
                                stimulusThreshold=spTest.stimulusThreshold)
      spTest._overlaps *= boostFactors
      onCellIndices = numpy.where(spTest._overlaps > 0)
      spTest._onCells.fill(0)
      spTest._onCells[onCellIndices] = 1
      denseOn = spTest._onCells

      # update _dutyCycleBeforeInh
      spTest.dutyCyclePeriod = min(i + 1, 1000)
      spTest._dutyCycleBeforeInh = (
          (spTest.dutyCyclePeriod - 1) *
          spTest._dutyCycleBeforeInh +denseOn) / spTest.dutyCyclePeriod
      dutyCycleTieBreaker = spTest._dutyCycleAfterInh.copy()
      dutyCycleTieBreaker *= kDutyCycleFactor

      # inhibition step
      numOn = spTest._inhibitionObj.compute(
          spTest._overlaps + dutyCycleTieBreaker, spTest._onCellIndices,
          stimulusThresholdInh,  # stimulusThresholdInh
          max(spTest._overlaps)/1000,  # addToWinners
      )
      # update _dutyCycleAfterInh
      spTest._onCells.fill(0)
      onCellIndices = spTest._onCellIndices[0:numOn]
      spTest._onCells[onCellIndices] = 1
      denseOn = spTest._onCells
      spTest._dutyCycleAfterInh = (((spTest.dutyCyclePeriod-1) *
                                    spTest._dutyCycleAfterInh + denseOn) /
                                   spTest.dutyCyclePeriod)

      # learning step
      spTest._adaptSynapses(onCellIndices, [], input_)

      # update boostFactor
      spTest._updateBoostFactors()
      boostFactors = spTest._firingBoostFactors

      # update dutyCycle and boost
      if ((spTest._iterNum+1) % 50) == 0:
        spTest._updateInhibitionObj()
        spTest._updateMinDutyCycles(
            spTest._dutyCycleBeforeInh,
            spTest.minPctDutyCycleBeforeInh,
            spTest._minDutyCycleBeforeInh)
        spTest._updateMinDutyCycles(
            spTest._dutyCycleAfterInh,
            spTest.minPctDutyCycleAfterInh,
            spTest._minDutyCycleAfterInh)

      # test numOn and spTest.numActiveColumnsPerInhArea
      self.assertEqual(numOn, spTest.numActiveColumnsPerInhArea,
                       "Error at input %s, actual numOn are: %i, "
                       "numActivePerInhAre is: %s" % (
                           i, numOn, numActiveColumnsPerInhArea))
Example #18
0
    def _runLearnInference(self,
                           n=30,
                           w=15,
                           columnDimensions=2048,
                           numActiveColumnsPerInhArea=40,
                           spSeed=1951,
                           spVerbosity=0,
                           numTrainingRecords=100,
                           seed=42):
        # Instantiate two identical spatial pooler. One will be used only for
        # learning. The other will be trained with identical records, but with
        # random inference calls thrown in
        spLearnOnly = SpatialPooler(
            columnDimensions=(columnDimensions, 1),
            inputDimensions=(1, n),
            potentialRadius=n / 2,
            numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
            spVerbosity=spVerbosity,
            seed=spSeed,
            synPermInactiveDec=0.01,
            synPermActiveInc=0.2,
            synPermConnected=0.11,
        )

        spLearnInfer = SpatialPooler(
            columnDimensions=(columnDimensions, 1),
            inputDimensions=(1, n),
            potentialRadius=n / 2,
            numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
            spVerbosity=spVerbosity,
            seed=spSeed,
            synPermInactiveDec=0.01,
            synPermActiveInc=0.2,
            synPermConnected=0.11,
        )

        random.seed(seed)
        np.random.seed(seed)

        # Build up training set with numTrainingRecords patterns
        inputs = []  # holds post-encoded input patterns
        for i in xrange(numTrainingRecords):
            inputVector = np.zeros(n, dtype=realDType)
            inputVector[random.sample(xrange(n), w)] = 1
            inputs.append(inputVector)

        # Train each SP with identical inputs
        startTime = time.time()

        random.seed(seed)
        np.random.seed(seed)
        for i in xrange(numTrainingRecords):
            if spVerbosity > 0:
                print "Input #%d" % i
            # TODO: See https://github.com/numenta/nupic/issues/2072
            encodedInput = inputs[i]
            decodedOutput = np.zeros(columnDimensions)
            spLearnOnly.compute(encodedInput,
                                learn=True,
                                activeArray=decodedOutput)

        random.seed(seed)
        np.random.seed(seed)
        for i in xrange(numTrainingRecords):
            if spVerbosity > 0:
                print "Input #%d" % i
            # TODO: See https://github.com/numenta/nupic/issues/2072
            encodedInput = inputs[i]
            decodedOutput = np.zeros(columnDimensions)
            spLearnInfer.compute(encodedInput,
                                 learn=True,
                                 activeArray=decodedOutput)

        print "\nElapsed time: %.2f seconds\n" % (time.time() - startTime)

        # Test that both SP"s are identical by checking learning stats
        # A more in depth test would check all the coincidences, duty cycles, etc.
        # ala tpDiff
        # Edit: spDiff has been written as an in depth tester of the spatial pooler
        learnOnlyStats = spLearnOnly.getLearningStats()
        learnInferStats = spLearnInfer.getLearningStats()

        success = True
        # Check that the two spatial poolers are equivalent after the same training.
        success = success and spDiff(spLearnInfer, spLearnOnly)
        self.assertTrue(success)
        # Make sure that the pickled and loaded SPs are equivalent.
        spPickle = pickle.dumps(spLearnOnly, protocol=0)
        spLearnOnlyLoaded = pickle.loads(spPickle)
        success = success and spDiff(spLearnOnly, spLearnOnlyLoaded)
        self.assertTrue(success)
        for k in learnOnlyStats.keys():
            if learnOnlyStats[k] != learnInferStats[k]:
                success = False
                print "Stat", k, "is different:", learnOnlyStats[
                    k], learnInferStats[k]

        self.assertTrue(success)
        if success:
            print "Test succeeded"
Example #19
0
def runHotgym(numRecords):
    with open(_PARAMS_PATH, "r") as f:
        modelParams = yaml.safe_load(f)["modelParams"]
        enParams = modelParams["sensorParams"]["encoders"]
        spParams = modelParams["spParams"]
        tmParams = modelParams["tmParams"]

    timeOfDayEncoder = DateEncoder(
        timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"])
    weekendEncoder = DateEncoder(
        weekend=enParams["timestamp_weekend"]["weekend"])
    scalarEncoder = RandomDistributedScalarEncoder(
        enParams["consumption"]["resolution"])

    encodingWidth = (timeOfDayEncoder.getWidth() + weekendEncoder.getWidth() +
                     scalarEncoder.getWidth())

    sp = SpatialPooler(
        # How large the input encoding will be.
        inputDimensions=(encodingWidth, ),
        # How many mini-columns will be in the Spatial Pooler.
        columnDimensions=(spParams["columnCount"], ),
        # What percent of the columns"s receptive field is available for potential
        # synapses?
        potentialPct=spParams["potentialPct"],
        # Potential radius should be set to the input size if there is global
        # inhibition.
        potentialRadius=encodingWidth,
        # This means that the input space has no topology.
        globalInhibition=spParams["globalInhibition"],
        localAreaDensity=spParams["localAreaDensity"],
        # Roughly 2%, giving that there is only one inhibition area because we have
        # turned on globalInhibition (40 / 2048 = 0.0195)
        numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
        # How quickly synapses grow and degrade.
        synPermInactiveDec=spParams["synPermInactiveDec"],
        synPermActiveInc=spParams["synPermActiveInc"],
        synPermConnected=spParams["synPermConnected"],
        # boostStrength controls the strength of boosting. Boosting encourages
        # efficient usage of SP columns.
        boostStrength=spParams["boostStrength"],
        # Random number generator seed.
        seed=spParams["seed"],
        # TODO: is this useful?
        # Determines if inputs at the beginning and end of an input dimension should
        # be considered neighbors when mapping columns to inputs.
        wrapAround=True)

    tm = TemporalMemory(
        # Must be the same dimensions as the SP
        columnDimensions=(tmParams["columnCount"], ),
        # How many cells in each mini-column.
        cellsPerColumn=tmParams["cellsPerColumn"],
        # A segment is active if it has >= activationThreshold connected synapses
        # that are active due to infActiveState
        activationThreshold=tmParams["activationThreshold"],
        initialPermanence=tmParams["initialPerm"],
        # TODO: This comes from the SP params, is this normal
        connectedPermanence=spParams["synPermConnected"],
        # Minimum number of active synapses for a segment to be considered during
        # search for the best-matching segments.
        minThreshold=tmParams["minThreshold"],
        # The max number of synapses added to a segment during learning
        maxNewSynapseCount=tmParams["newSynapseCount"],
        permanenceIncrement=tmParams["permanenceInc"],
        permanenceDecrement=tmParams["permanenceDec"],
        predictedSegmentDecrement=0.0,
        maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
        maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
        seed=tmParams["seed"])

    classifier = SDRClassifierFactory.create()
    results = []
    with open(_INPUT_FILE_PATH, "r") as fin:
        reader = csv.reader(fin)
        headers = reader.next()
        reader.next()
        reader.next()

        for count, record in enumerate(reader):

            if count >= numRecords: break

            # Convert data string into Python date object.
            dateString = datetime.datetime.strptime(record[0],
                                                    "%m/%d/%y %H:%M")
            # Convert data value string into float.
            consumption = float(record[1])

            # To encode, we need to provide zero-filled numpy arrays for the encoders
            # to populate.
            timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
            weekendBits = numpy.zeros(weekendEncoder.getWidth())
            consumptionBits = numpy.zeros(scalarEncoder.getWidth())

            # Now we call the encoders create bit representations for each value.
            timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
            weekendEncoder.encodeIntoArray(dateString, weekendBits)
            scalarEncoder.encodeIntoArray(consumption, consumptionBits)

            # Concatenate all these encodings into one large encoding for Spatial
            # Pooling.
            encoding = numpy.concatenate(
                [timeOfDayBits, weekendBits, consumptionBits])

            # Create an array to represent active columns, all initially zero. This
            # will be populated by the compute method below. It must have the same
            # dimensions as the Spatial Pooler.
            activeColumns = numpy.zeros(spParams["columnCount"])

            # Execute Spatial Pooling algorithm over input space.
            sp.compute(encoding, True, activeColumns)
            activeColumnIndices = numpy.nonzero(activeColumns)[0]

            # Execute Temporal Memory algorithm over active mini-columns.
            tm.compute(activeColumnIndices, learn=True)

            activeCells = tm.getActiveCells()

            # Get the bucket info for this input value for classification.
            bucketIdx = scalarEncoder.getBucketIndices(consumption)[0]

            # Run classifier to translate active cells back to scalar value.
            classifierResult = classifier.compute(recordNum=count,
                                                  patternNZ=activeCells,
                                                  classification={
                                                      "bucketIdx": bucketIdx,
                                                      "actValue": consumption
                                                  },
                                                  learn=True,
                                                  infer=True)

            # Print the best prediction for 1 step out.
            oneStepConfidence, oneStep = sorted(zip(
                classifierResult[1], classifierResult["actualValues"]),
                                                reverse=True)[0]
            print("1-step: {:16} ({:4.4}%)".format(oneStep,
                                                   oneStepConfidence * 100))
            results.append([oneStep, oneStepConfidence * 100, None, None])

        return results
    def initialize(self):
        # Keep track of value range for spatial anomaly detection.
        self.minVal = None
        self.maxVal = None

        # Time of day encoder
        self.timeOfDayEncoder = DateEncoder(timeOfDay=(21, 9.49),
                                            name='time_enc')
        # RDSE encoder for the time series value.
        minResolution = 0.001
        rangePadding = abs(self.inputMax - self.inputMin) * 0.2
        minVal = self.inputMin - rangePadding
        maxVal = self.inputMax + rangePadding
        numBuckets = 130
        resolution = max(minResolution, (maxVal - minVal) / numBuckets)
        self.value_enc = RandomDistributedScalarEncoder(resolution=resolution,
                                                        name='value_rdse')

        # Spatial Pooler.
        encodingWidth = self.timeOfDayEncoder.getWidth(
        ) + self.value_enc.getWidth()
        self.sp = SpatialPooler(
            inputDimensions=(encodingWidth, ),
            columnDimensions=(2048, ),
            potentialPct=0.8,
            potentialRadius=encodingWidth,
            globalInhibition=1,
            numActiveColumnsPerInhArea=40,
            synPermInactiveDec=0.0005,
            synPermActiveInc=0.003,
            synPermConnected=0.2,
            boostStrength=0.0,
            seed=1956,
            wrapAround=True,
        )

        self.tm = TemporalMemory(
            columnDimensions=(2048, ),
            cellsPerColumn=32,
            activationThreshold=20,
            initialPermanence=.5,  # Increased to connectedPermanence.
            connectedPermanence=.5,
            minThreshold=13,
            maxNewSynapseCount=31,
            permanenceIncrement=0.04,
            permanenceDecrement=0.008,
            predictedSegmentDecrement=0.001,
            maxSegmentsPerCell=128,
            maxSynapsesPerSegment=
            128,  # Changed meaning. Also see connections.topology[2]
            seed=1993,
        )

        # Initialize the anomaly likelihood object
        numentaLearningPeriod = int(math.floor(self.probationaryPeriod / 2.0))
        self.anomalyLikelihood = anomaly_likelihood.AnomalyLikelihood(
            learningPeriod=numentaLearningPeriod,
            estimationSamples=self.probationaryPeriod - numentaLearningPeriod,
            reestimationPeriod=100,
        )

        self.age = 0
Example #21
0
def main(argv):
  args, _ = parse_argv()
  data_set        = args.data_set
  num_data_points = args.num_data_points
  sp_type         = args.pooler_type
  num_epochs      = args.num_epochs
  batch_size      = args.batch_size
  experiment_id   = args.experiment_id
  seed            = args.seed


  the_scripts_path = os.path.dirname(os.path.realpath(__file__)) # script directory

  sp_params_dict  = json.load(open(the_scripts_path + "/params.json"))
  if args.sp_params is not None:
    sp_params       = sp_params_dict[sp_type][args.sp_params]
  else:
    sp_params       = sp_params_dict[sp_type][data_set]
  sp_params["seed"] = seed

  if experiment_id is None:
    experiment_id = random_id(5)

  path = the_scripts_path + "/../results/{}_pooler_{}_{}/".format(sp_type, data_set,experiment_id)
  os.makedirs(os.path.dirname(path))

  print(
    "Experiment directory:\n\"{}\"\n"
    .format(path))

  X, _, X_test, _ = load_data(data_set, num_inputs = num_data_points) 

  n, m = get_shape(sp_params)
  X    = X[:,:num_data_points]
  d    = X.shape[1]
  

  results = {
    "inputs"      : [],
    "outputs"     : [],
    "feedforward" : []}


  ####################################################
  # 
  #               Old Spatial Pooler
  # 
  ####################################################
  if sp_type == "ordinary":
    
    pooler = OldSpatialPooler(**sp_params)

    print(
      "Training ordinary pooler:\n")

    # "Fit" the model to the training data
    for epoch in range(num_epochs):
      Y = np.zeros((n,d))

      perm = np.random.permutation(d)

      for t in range(d):

        sys.stdout.flush()
        sys.stdout.write(
          "\r{}/{}  {}/{}"
            .format(num_epochs, epoch + 1, d, t + 1))

        x = X[:,perm[t]]
        y = Y[:, t]
        pooler.compute(x, True, y)

      results["inputs"].append(X)
      results["outputs"].append(Y)
      results["feedforward"].append(get_permanence_vals(pooler)) 
      
  
  ####################################################
  # 
  #             New Spatial Pooler with 
  #     learned lateral inhibitory connections
  # 
  #####################################################
  elif sp_type == "lateral":

    pooler = LateralPooler(**sp_params)

    sys.stdout.write(
      "Training dynamic lateral pooler:\n")

    collect_feedforward = ModelInspector(lambda pooler: pooler.feedforward.copy(), on_batch = False )
    # collect_lateral     = ModelInspector(lambda pooler: pooler.inhibitory.copy(),  on_batch = False )
    training_log        = OutputCollector()
    print_training_status = Logger()

    # "Fit" the model to the training datasets
    pooler.fit(X, batch_size=batch_size, num_epochs=num_epochs, initial_epoch=0, callbacks=[collect_feedforward, training_log, print_training_status])

    results["inputs"]      = training_log.get_inputs()
    results["outputs"]     = training_log.get_outputs()
    results["feedforward"] = collect_feedforward.get_results()
    # results["lateral"]     = collect_lateral.get_results() 


  dump_dict(path, sp_params)
  dump_results(path, results)

  print(
    "\nDone.\n")
Example #22
0
class Entity():
    def __init__(self, columnCount, InputEncoderParams, toL4ConnectorParamsI,
                 toL4ConnectorParamsII, toL5ConnectorParams,
                 toD1ConnectorParams, toD2ConnectorParams, L4Params, L5Params,
                 k, D1Params, D2Params):
        self.columnCount = columnCount
        self.toL4ConnectorParamsI = toL4ConnectorParamsI
        self.toL4ConnectorParamsII = toL4ConnectorParamsII
        self.toL5ConnectorParams = toL5ConnectorParams
        self.toD1ConnectorParams = toD1ConnectorParams
        self.toD2ConnectorParams = toD2ConnectorParams
        self.L4Params = L4Params
        self.L5Params = L5Params
        self.k = k
        self.D1Params = D1Params
        self.D2Params = D2Params
        self.learning = False

        #encoder
        from nupic.encoders import MultiEncoder
        self.InputEncoder = MultiEncoder()
        self.InputEncoder.addMultipleEncoders(InputEncoderParams)
        print "Encoder Online"

        #spatialPoolers
        from nupic.algorithms.spatial_pooler import SpatialPooler
        self.toL4ConnectorI = SpatialPooler(
            inputDimensions=(toL4ConnectorParamsI["inputDimensions"], ),
            columnDimensions=(columnCount, ),
            potentialPct=toL4ConnectorParamsI["potentialPct"],
            globalInhibition=toL4ConnectorParamsI["globalInhibition"],
            localAreaDensity=toL4ConnectorParamsI["localAreaDensity"],
            numActiveColumnsPerInhArea=toL4ConnectorParamsI[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL4ConnectorParamsI["synPermInactiveDec"],
            synPermActiveInc=toL4ConnectorParamsI["synPermActiveInc"],
            synPermConnected=toL4ConnectorParamsI["synPermConnected"],
            boostStrength=toL4ConnectorParamsI["boostStrength"],
            seed=toL4ConnectorParamsI["seed"],
            wrapAround=toL4ConnectorParamsI["wrapAround"])  #this part sucks
        self.toL4ConnectorII = SpatialPooler(
            inputDimensions=(columnCount * 3, ),
            columnDimensions=(columnCount, ),
            potentialPct=toL4ConnectorParamsII["potentialPct"],
            globalInhibition=toL4ConnectorParamsII["globalInhibition"],
            localAreaDensity=toL4ConnectorParamsII["localAreaDensity"],
            numActiveColumnsPerInhArea=toL4ConnectorParamsII[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL4ConnectorParamsII["synPermInactiveDec"],
            synPermActiveInc=toL4ConnectorParamsII["synPermActiveInc"],
            synPermConnected=toL4ConnectorParamsII["synPermConnected"],
            boostStrength=toL4ConnectorParamsII["boostStrength"],
            seed=toL4ConnectorParamsII["seed"],
            wrapAround=toL4ConnectorParamsII["wrapAround"])
        print "toL4Connector Online"
        self.toL5Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toL5ConnectorParams["potentialPct"],
            globalInhibition=toL5ConnectorParams["globalInhibition"],
            localAreaDensity=toL5ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toL5ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL5ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toL5ConnectorParams["synPermActiveInc"],
            synPermConnected=toL5ConnectorParams["synPermConnected"],
            boostStrength=toL5ConnectorParams["boostStrength"],
            seed=toL5ConnectorParams["seed"],
            wrapAround=toL5ConnectorParams["wrapAround"])
        print "toL5Connector Online"
        self.toD1Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toD1ConnectorParams["potentialPct"],
            globalInhibition=toD1ConnectorParams["globalInhibition"],
            localAreaDensity=toD1ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toD1ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toD1ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toD1ConnectorParams["synPermActiveInc"],
            synPermConnected=toD1ConnectorParams["synPermConnected"],
            boostStrength=toD1ConnectorParams["boostStrength"],
            seed=toD1ConnectorParams["seed"],
            wrapAround=toD1ConnectorParams["wrapAround"])
        print "toD1Connector Online"
        self.toD2Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toD2ConnectorParams["potentialPct"],
            globalInhibition=toD2ConnectorParams["globalInhibition"],
            localAreaDensity=toD2ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toD2ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toD2ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toD2ConnectorParams["synPermActiveInc"],
            synPermConnected=toD2ConnectorParams["synPermConnected"],
            boostStrength=toD2ConnectorParams["boostStrength"],
            seed=toD2ConnectorParams["seed"],
            wrapAround=toD2ConnectorParams["wrapAround"])
        print "toD2Connector Online"

        #HTM Layers
        from nupic.algorithms.temporal_memory import TemporalMemory
        self.L4ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.L4 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
        )
        print "L4 Online"
        self.L5ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.L5 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
        )
        print "L5 Online"
        self.D1ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.D1 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
            initialPermanence=0.21,
            connectedPermanence=0.5,
        )
        print "D1 Online"
        self.D2ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.D2 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
            initialPermanence=0.21,
            connectedPermanence=0.5,
        )
        print "D2 Online"

    def encode_input(sine1, sine2, angularSpeed1, angularSpeed2,
                     efferenceCopy):
        return self.InputEncoder.encode({
            "sine1": sine1,
            "sine2": sine2,
            "angularSpeed1": angularSpeed1,
            "angularSpeed2": angularSpeed2,
            "efferenceCopy": efferenceCopy
        })

    def reset(self):
        self.action = 0
        self.L4.reset()
        self.L5.reset()
        self.D1.reset()
        self.D2.reset()

    def mimic(self, observation, action):
        #mimicking only requires remembering the given obs-act pattern,thus the striatum is neglected in this func
        self.learning = True
        self.action = action
        encodedInput = self.encode_input(observation[0], observation[2],
                                         observation[4], observation[5],
                                         str(action))

        self.toL4ConnectorI.compute(encodedInput, self.learning,
                                    self.L4ActiveColumns)
        self.L4.compute(self.L4ActiveColumns, learn=self.learning)
        L4activeColumnIndices = numpy.nonzero(self.L4ActiveColumns)[0]

        L5Temp = numpy.zeros(self.columnCount, dtype=int)
        for column in L4activeColumnIndices:
            L5Temp[column] = 1
        self.toL5Connector.compute(L5Temp, self.learning, self.L5ActiveColumns)
        self.L5.compute(self.L5ActiveColumns, learn=self.learning)
        L5activeColumnIndices = numpy.nonzero(self.L5ActiveColumns)[0]

        #no action generation is needed in this func

    def learn(self, env, observation, expectedReaction):
        #We humans learn by trial and error,so does an AI agent.For neural networks,they have BP,but HTM does not
        #have a clear way to reinforcement learn(Where to feed in rewards?).Here I try to do something new.
        self.learning = False  #...trial
        encodedInput = self.encode_input(observation[0], observation[2],
                                         observation[4], observation[5],
                                         str(self.action))

        self.toL4ConnectorI.compute(encodedInput, self.learning,
                                    self.L4ActiveColumns)
        L4Temp = numpy.zeros(
            self.columnCount * 3, dtype=int
        )  #ready to receive D1's disinhibition and D2's inhibition
        L4activeColumnIndices = numpy.nonzero(self.L4ActiveColumns)[0]
        for column in L4activeColumnIndices:
            L4Temp[int(column) * 3] = 1
        D1ActiveColumnsIndices = numpy.nonzero(self.D1ActiveColumns)[0]
        for column in D1ActiveColumnsIndices:
            L4Temp[int(column) * 3 + 1] = 1
        D2ActiveColumnsIndices = numpy.nonzero(self.D2ActiveColumns)[0]
        for i in range(self.columnCount - 1):
            L4Temp[i * 3 + 2] = 1
        for column in D2ActiveColumnsIndices:  #achieve inhibition in this way
            L4Temp[i * 3 + 2] = 0
        self.toL4ConnectorII.compute(L4Temp, self.learning,
                                     self.L4ActiveColumns)
        self.L4.compute(self.L4ActiveColumns, learn=self.learning)
        L4activeColumnIndices = numpy.nonzero(self.L4ActiveColumns)[0]

        L5Temp = numpy.zeros(self.columnCount, dtype=int)
        for column in L4activeColumnIndices:
            L5Temp[column] = 1
        self.toL5Connector.compute(L5Temp, self.learning, self.L5ActiveColumns)
        self.L5.compute(self.L5ActiveColumns, learn=self.learning)
        L5activeColumnIndices = numpy.nonzero(self.L5ActiveColumns)[0]

        #Action Generation
        p = 84  #there are 84 bits in the SDR representing the action fed in the agent,this is the "Efference Copy"
        count0 = 0
        count1 = 0
        count2 = 0
        for activeIndice in L5activeColumnIndices:
            convertedIndice = (activeIndice + 1) * 1126 / columnCount
            if convertedIndice <= 1126 - p / 4 and convertedIndice > 1126 - p / 2:
                count2 = count2 + 1
            if convertedIndice <= 1126 - p / 2 and convertedIndice > 1126 - 3 * p / 4:
                count1 = count1 + 1
            if convertedIndice <= 1126 - 3 * p / 4 and convertedIndice > 1126 - p:
                count0 = count0 + 1

        if count2 == max(count0, count1, count2):
            self.action = 2
        if count1 == max(count0, count1, count2):
            self.action = 1
        if count0 == max(count0, count1, count2):
            self.action = 0

        #...and error
        if self.action == expectedReaction:
            reward = 0.1
        else:
            reward = -0.1

        self.D1.setConnectedPermanence(self.D1.getConnectedPermanence() *
                                       (self.k**(-reward)))  #reward
        self.D2.setConnectedPermanence(self.D2.getConnectedPermanence() *
                                       (self.k**reward))  #punishment

        #Learn to correct mistakes(remember what's right and whats' wrong)
        self.learning = True
        DTemp = numpy.zeros(self.columnCount, dtype=int)
        for column in L5activeColumnIndices:
            DTemp[column] = 1
        self.toD1Connector.compute(DTemp, self.learning, self.D1ActiveColumns)
        self.toD2Connector.compute(DTemp, self.learning, self.D2ActiveColumns)
        self.D1.compute(self.D1ActiveColumns, learn=self.learning)
        self.D2.compute(self.D2ActiveColumns, learn=self.learning)

        return reward

    def react(self, observation):
        self.learning = False
        encodedInput = self.encode_input(observation[0], observation[2],
                                         observation[4], observation[5],
                                         str(self.action))

        self.toL4ConnectorI.compute(encodedInput, self.learning,
                                    self.L4ActiveColumns)
        L4Temp = numpy.zeros(self.columnCount * 3, dtype=int)
        L4activeColumnIndices = numpy.nonzero(self.L4ActiveColumns)[0]
        for column in L4activeColumnIndices:
            L4Temp[int(column) * 3] = 1
        D1ActiveColumnsIndices = numpy.nonzero(self.D1ActiveColumns)[0]
        for column in D1ActiveColumnsIndices:
            L4Temp[int(column) * 3 + 1] = 1
        D2ActiveColumnsIndices = numpy.nonzero(self.D2ActiveColumns)[0]
        for i in range(self.columnCount - 1):
            L4Temp[i * 3 + 2] = 1
        for column in D2ActiveColumnsIndices:
            L4Temp[i * 3 + 2] = 0
        self.toL4ConnectorII.compute(L4Temp, self.learning,
                                     self.L4ActiveColumns)
        self.L4.compute(self.L4ActiveColumns, learn=self.learning)
        L4activeColumnIndices = numpy.nonzero(self.L4ActiveColumns)[0]

        L5Temp = numpy.zeros(self.columnCount, dtype=int)
        for column in L4activeColumnIndices:
            L5Temp[column] = 1
        self.toL5Connector.compute(L5Temp, self.learning, self.L5ActiveColumns)
        self.L5.compute(self.L5ActiveColumns, learn=self.learning)
        L5activeColumnIndices = numpy.nonzero(self.L5ActiveColumns)[0]

        p = 84
        count0 = 0
        count1 = 0
        count2 = 0
        for activeIndice in L5activeColumnIndices:
            convertedIndice = (activeIndice + 1) * 1126 / columnCount
            if convertedIndice <= 1126 - p / 4 and convertedIndice > 1126 - p / 2:
                count2 = count2 + 1
            if convertedIndice <= 1126 - p / 2 and convertedIndice > 1126 - 3 * p / 4:
                count1 = count1 + 1
            if convertedIndice <= 1126 - 3 * p / 4 and convertedIndice > 1126 - p:
                count0 = count0 + 1

        if count2 == max(count0, count1, count2):
            self.action = 2
        if count1 == max(count0, count1, count2):
            self.action = 1
        if count0 == max(count0, count1, count2):
            self.action = 0

        return self.action
Example #23
0
def runHotgym(numRecords):
  with open(_PARAMS_PATH, "r") as f:
    modelParams = yaml.safe_load(f)["modelParams"]
    enParams = modelParams["sensorParams"]["encoders"]
    spParams = modelParams["spParams"]
    tmParams = modelParams["tmParams"]

  timeOfDayEncoder = DateEncoder(
    timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"])
  weekendEncoder = DateEncoder(
    weekend=enParams["timestamp_weekend"]["weekend"])
  scalarEncoder = RandomDistributedScalarEncoder(
    enParams["consumption"]["resolution"])

  encodingWidth = (timeOfDayEncoder.getWidth()
                   + weekendEncoder.getWidth()
                   + scalarEncoder.getWidth())

  sp = SpatialPooler(
    inputDimensions=(encodingWidth,),
    columnDimensions=(spParams["columnCount"],),
    potentialPct=spParams["potentialPct"],
    potentialRadius=encodingWidth,
    globalInhibition=spParams["globalInhibition"],
    localAreaDensity=spParams["localAreaDensity"],
    numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
    synPermInactiveDec=spParams["synPermInactiveDec"],
    synPermActiveInc=spParams["synPermActiveInc"],
    synPermConnected=spParams["synPermConnected"],
    boostStrength=spParams["boostStrength"],
    seed=spParams["seed"],
    wrapAround=True
  )

  tm = TemporalMemory(
    columnDimensions=(tmParams["columnCount"],),
    cellsPerColumn=tmParams["cellsPerColumn"],
    activationThreshold=tmParams["activationThreshold"],
    initialPermanence=tmParams["initialPerm"],
    connectedPermanence=spParams["synPermConnected"],
    minThreshold=tmParams["minThreshold"],
    maxNewSynapseCount=tmParams["newSynapseCount"],
    permanenceIncrement=tmParams["permanenceInc"],
    permanenceDecrement=tmParams["permanenceDec"],
    predictedSegmentDecrement=0.0,
    maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
    maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
    seed=tmParams["seed"]
  )

  classifier = SDRClassifierFactory.create()
  results = []
  with open(_INPUT_FILE_PATH, "r") as fin:
    reader = csv.reader(fin)
    headers = reader.next()
    reader.next()
    reader.next()

    for count, record in enumerate(reader):

      if count >= numRecords: break

      # Convert data string into Python date object.
      dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
      # Convert data value string into float.
      consumption = float(record[1])

      # To encode, we need to provide zero-filled numpy arrays for the encoders
      # to populate.
      timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
      weekendBits = numpy.zeros(weekendEncoder.getWidth())
      consumptionBits = numpy.zeros(scalarEncoder.getWidth())

      # Now we call the encoders to create bit representations for each value.
      timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
      weekendEncoder.encodeIntoArray(dateString, weekendBits)
      scalarEncoder.encodeIntoArray(consumption, consumptionBits)

      # Concatenate all these encodings into one large encoding for Spatial
      # Pooling.
      encoding = numpy.concatenate(
        [timeOfDayBits, weekendBits, consumptionBits]
      )

      # Create an array to represent active columns, all initially zero. This
      # will be populated by the compute method below. It must have the same
      # dimensions as the Spatial Pooler.
      activeColumns = numpy.zeros(spParams["columnCount"])

      # Execute Spatial Pooling algorithm over input space.
      sp.compute(encoding, True, activeColumns)
      activeColumnIndices = numpy.nonzero(activeColumns)[0]

      # Execute Temporal Memory algorithm over active mini-columns.
      tm.compute(activeColumnIndices, learn=True)

      activeCells = tm.getActiveCells()

      # Get the bucket info for this input value for classification.
      bucketIdx = scalarEncoder.getBucketIndices(consumption)[0]

      # Run classifier to translate active cells back to scalar value.
      classifierResult = classifier.compute(
        recordNum=count,
        patternNZ=activeCells,
        classification={
          "bucketIdx": bucketIdx,
          "actValue": consumption
        },
        learn=True,
        infer=True
      )

      # Print the best prediction for 1 step out.
      oneStepConfidence, oneStep = sorted(
        zip(classifierResult[1], classifierResult["actualValues"]),
        reverse=True
      )[0]
      print("1-step: {:16} ({:4.4}%)".format(oneStep, oneStepConfidence * 100))
      results.append([oneStep, oneStepConfidence * 100, None, None])

    return results
Example #24
0
        now = datetime.datetime.strptime(lines, "%Y-%m-%d %H:%M:%S")
        print "now =       ", de.encode(now)
    cpt += 1

categories = ('info', 'error', 'warning')
encoder = CategoryEncoder(w=3, categoryList=categories, forced=True)
info = encoder.encode("info")
error = encoder.encode("error")
warning = encoder.encode("warning")
#print "info =       ", info
#print "error =       ", error
#print "warning =       ", warning
sp = SpatialPooler(inputDimensions=(len(info), ),
                   columnDimensions=(3, ),
                   potentialRadius=15,
                   numActiveColumnsPerInhArea=1,
                   globalInhibition=True,
                   synPermActiveInc=0.03,
                   potentialPct=1.0)
import numpy
for column in xrange(3):
    connected = numpy.zeros((len(info), ), dtype="int")
    sp.getConnectedSynapses(column, connected)
    print connected

output = numpy.zeros((3, ), dtype="int")
sp.compute(info, learn=True, activeArray=output)
print output

output = numpy.zeros((3, ), dtype="int")
sp.compute(error, learn=True, activeArray=output)
Example #25
0
    def testInhibition(self):
        """
    Test if the firing number of coincidences after inhibition
    equals spatial pooler numActiveColumnsPerInhArea.
    """
        # Miscellaneous variables:
        # n, w:                 n, w of encoders
        # inputLen:             Length of binary input
        # synPermConnected:     Spatial pooler synPermConnected
        # synPermActiveInc:     Spatial pooler synPermActiveInc
        # connectPct:           Initial connect percentage of permanences
        # columnDimensions:     Number of spatial pooler coincidences
        # numActiveColumnsPerInhArea:  Spatial pooler numActiveColumnsPerInhArea
        # stimulusThreshold:    Spatial pooler stimulusThreshold
        # spSeed:               Spatial pooler for initial permanences
        # stimulusThresholdInh: Parameter for inhibition, default value 0.00001
        # kDutyCycleFactor:     kDutyCycleFactor for dutyCycleTieBreaker in
        #                       Inhibition
        # spVerbosity:          Verbosity to print other sp initial parameters
        # testIter:             Testing iterations
        n = 100
        w = 15
        inputLen = 300
        columnDimensions = 2048
        numActiveColumnsPerInhArea = 40
        stimulusThreshold = 0
        spSeed = 1956
        stimulusThresholdInh = 0.00001
        kDutyCycleFactor = 0.01
        spVerbosity = 0
        testIter = 100

        spTest = SpatialPooler(
            columnDimensions=(columnDimensions, 1),
            inputDimensions=(1, inputLen),
            potentialRadius=inputLen / 2,
            numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
            spVerbosity=spVerbosity,
            stimulusThreshold=stimulusThreshold,
            seed=spSeed)
        initialPermanence = spTest._initialPermanence()
        spTest._masterPotentialM, spTest._masterPermanenceM = (
            spTest._makeMasterCoincidences(spTest.numCloneMasters,
                                           spTest._coincRFShape,
                                           spTest.potentialPct,
                                           initialPermanence, spTest.random))

        spTest._updateInhibitionObj()
        boostFactors = numpy.ones(columnDimensions)

        for i in range(testIter):
            spTest._iterNum = i
            # random binary input
            input_ = numpy.zeros((1, inputLen))
            nonzero = numpy.random.random(inputLen)
            input_[0][numpy.where(nonzero < float(w) / float(n))] = 1

            # overlap step
            spTest._computeOverlapsFP(
                input_, stimulusThreshold=spTest.stimulusThreshold)
            spTest._overlaps *= boostFactors
            onCellIndices = numpy.where(spTest._overlaps > 0)
            spTest._onCells.fill(0)
            spTest._onCells[onCellIndices] = 1
            denseOn = spTest._onCells

            # update _dutyCycleBeforeInh
            spTest.dutyCyclePeriod = min(i + 1, 1000)
            spTest._dutyCycleBeforeInh = (
                (spTest.dutyCyclePeriod - 1) * spTest._dutyCycleBeforeInh +
                denseOn) / spTest.dutyCyclePeriod
            dutyCycleTieBreaker = spTest._dutyCycleAfterInh.copy()
            dutyCycleTieBreaker *= kDutyCycleFactor

            # inhibition step
            numOn = spTest._inhibitionObj.compute(
                spTest._overlaps + dutyCycleTieBreaker,
                spTest._onCellIndices,
                stimulusThresholdInh,  # stimulusThresholdInh
                max(spTest._overlaps) / 1000,  # addToWinners
            )
            # update _dutyCycleAfterInh
            spTest._onCells.fill(0)
            onCellIndices = spTest._onCellIndices[0:numOn]
            spTest._onCells[onCellIndices] = 1
            denseOn = spTest._onCells
            spTest._dutyCycleAfterInh = (
                ((spTest.dutyCyclePeriod - 1) * spTest._dutyCycleAfterInh +
                 denseOn) / spTest.dutyCyclePeriod)

            # learning step
            spTest._adaptSynapses(onCellIndices, [], input_)

            # update boostFactor
            spTest._updateBoostFactors()
            boostFactors = spTest._firingBoostFactors

            # update dutyCycle and boost
            if ((spTest._iterNum + 1) % 50) == 0:
                spTest._updateInhibitionObj()
                spTest._updateMinDutyCycles(spTest._dutyCycleBeforeInh,
                                            spTest.minPctDutyCycleBeforeInh,
                                            spTest._minDutyCycleBeforeInh)
                spTest._updateMinDutyCycles(spTest._dutyCycleAfterInh,
                                            spTest.minPctDutyCycleAfterInh,
                                            spTest._minDutyCycleAfterInh)

            # test numOn and spTest.numActiveColumnsPerInhArea
            self.assertEqual(
                numOn, spTest.numActiveColumnsPerInhArea,
                "Error at input %s, actual numOn are: %i, "
                "numActivePerInhAre is: %s" %
                (i, numOn, numActiveColumnsPerInhArea))
Example #26
0
# Spatial Pooler Parameters

var_encoders = {Encoder('value', ['RDSE'])}
# Encoder('_index', ['TODE'])}

encoder_width = 0
for x in var_encoders:
    for y in x.encoders:
        exec("s = " + y + ".getWidth()")
        encoder_width += s

SP = SpatialPooler(
    inputDimensions=encoder_width,
    columnDimensions=pooler_out,
    potentialPct=0.8,
    globalInhibition=True,
    numActiveColumnsPerInhArea=pooler_out // 50,  # Gets 2% of the total area
    boostStrength=1.0,
    wrapAround=False)
TM = TemporalMemory(columnDimensions=(pooler_out, ), cellsPerColumn=cell_col)

# Train Spatial Pooler
start = time.time()

active_columns = np.zeros(pooler_out)

print("Spatial pooler learning")

for x in range(len(Data)):
    encoder = multiencode(var_encoders, Data, x)
    # e_val = RDSE.encode(Data['value'][x])
Example #27
0
def runHotgym(numRecords):
  with open(_PARAMS_PATH, "r") as f:
    modelParams = yaml.safe_load(f)["modelParams"]
    enParams = modelParams["sensorParams"]["encoders"]
    spParams = modelParams["spParams"]
    tmParams = modelParams["tmParams"]

  timeOfDayEncoder = DateEncoder(
    timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"])
  weekendEncoder = DateEncoder(
    weekend=enParams["timestamp_weekend"]["weekend"])
  scalarEncoder = RandomDistributedScalarEncoder(
    enParams["consumption"]["resolution"])

  encodingWidth = (timeOfDayEncoder.getWidth()
                   + weekendEncoder.getWidth()
                   + scalarEncoder.getWidth())

  sp = SpatialPooler(
    # How large the input encoding will be.
    inputDimensions=(encodingWidth),
    # How many mini-columns will be in the Spatial Pooler.
    columnDimensions=(spParams["columnCount"]),
    # What percent of the columns"s receptive field is available for potential
    # synapses?
    potentialPct=spParams["potentialPct"],
    # This means that the input space has no topology.
    globalInhibition=spParams["globalInhibition"],
    localAreaDensity=spParams["localAreaDensity"],
    # Roughly 2%, giving that there is only one inhibition area because we have
    # turned on globalInhibition (40 / 2048 = 0.0195)
    numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
    # How quickly synapses grow and degrade.
    synPermInactiveDec=spParams["synPermInactiveDec"],
    synPermActiveInc=spParams["synPermActiveInc"],
    synPermConnected=spParams["synPermConnected"],
    # boostStrength controls the strength of boosting. Boosting encourages
    # efficient usage of SP columns.
    boostStrength=spParams["boostStrength"],
    # Random number generator seed.
    seed=spParams["seed"],
    # TODO: is this useful?
    # Determines if inputs at the beginning and end of an input dimension should
    # be considered neighbors when mapping columns to inputs.
    wrapAround=False
  )

  tm = TemporalMemory(
    # Must be the same dimensions as the SP
    columnDimensions=(tmParams["columnCount"],),
    # How many cells in each mini-column.
    cellsPerColumn=tmParams["cellsPerColumn"],
    # A segment is active if it has >= activationThreshold connected synapses
    # that are active due to infActiveState
    activationThreshold=tmParams["activationThreshold"],
    initialPermanence=tmParams["initialPerm"],
    # TODO: This comes from the SP params, is this normal
    connectedPermanence=spParams["synPermConnected"],
    # Minimum number of active synapses for a segment to be considered during
    # search for the best-matching segments.
    minThreshold=tmParams["minThreshold"],
    # The max number of synapses added to a segment during learning
    maxNewSynapseCount=tmParams["newSynapseCount"],
    permanenceIncrement=tmParams["permanenceInc"],
    permanenceDecrement=tmParams["permanenceDec"],
    predictedSegmentDecrement=0.0,
    maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
    maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
    seed=tmParams["seed"]
  )

  classifier = SDRClassifierFactory.create()
  results = []
  with open(_INPUT_FILE_PATH, "r") as fin:
    reader = csv.reader(fin)
    headers = reader.next()
    reader.next()
    reader.next()

    for count, record in enumerate(reader):

      if count >= numRecords: break

      # Convert data string into Python date object.
      dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
      # Convert data value string into float.
      consumption = float(record[1])

      # To encode, we need to provide zero-filled numpy arrays for the encoders
      # to populate.
      timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
      weekendBits = numpy.zeros(weekendEncoder.getWidth())
      consumptionBits = numpy.zeros(scalarEncoder.getWidth())

      # Now we call the encoders create bit representations for each value.
      timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
      weekendEncoder.encodeIntoArray(dateString, weekendBits)
      scalarEncoder.encodeIntoArray(consumption, consumptionBits)

      # Concatenate all these encodings into one large encoding for Spatial
      # Pooling.
      encoding = numpy.concatenate(
        [timeOfDayBits, weekendBits, consumptionBits]
      )

      # Create an array to represent active columns, all initially zero. This
      # will be populated by the compute method below. It must have the same
      # dimensions as the Spatial Pooler.
      activeColumns = numpy.zeros(spParams["columnCount"])

      # Execute Spatial Pooling algorithm over input space.
      sp.compute(encoding, True, activeColumns)
      activeColumnIndices = numpy.nonzero(activeColumns)[0]

      # Execute Temporal Memory algorithm over active mini-columns.
      tm.compute(activeColumnIndices, learn=True)

      activeCells = tm.getActiveCells()

      # Get the bucket info for this input value for classification.
      bucketIdx = scalarEncoder.getBucketIndices(consumption)[0]

      # Run classifier to translate active cells back to scalar value.
      classifierResult = classifier.compute(
        recordNum=count,
        patternNZ=activeCells,
        classification={
          "bucketIdx": bucketIdx,
          "actValue": consumption
        },
        learn=True,
        infer=True
      )

      # Print the best prediction for 1 step out.
      oneStepConfidence, oneStep = sorted(
        zip(classifierResult[1], classifierResult["actualValues"]),
        reverse=True
      )[0]
      print("1-step: {:16} ({:4.4}%)".format(oneStep, oneStepConfidence * 100))
      results.append([oneStep, oneStepConfidence * 100, None, None])

    return results
Example #28
0
 def __init__(self, b1, b2, *args, **kw_args):
   self.b1 = b1
   self.b2 = b2
   SpatialPooler.__init__(self, *args, **kw_args)
   self.reset()
Example #29
0
  def _runLearnInference(self,
                         n=30,
                         w=15,
                         columnDimensions=2048,
                         numActiveColumnsPerInhArea=40,
                         spSeed=1951,
                         spVerbosity=0,
                         numTrainingRecords=100,
                         seed=42):
    # Instantiate two identical spatial pooler. One will be used only for
    # learning. The other will be trained with identical records, but with
    # random inference calls thrown in
    spLearnOnly = SpatialPooler(
        columnDimensions=(columnDimensions, 1),
        inputDimensions=(1, n),
        potentialRadius=n/2,
        numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
        spVerbosity=spVerbosity,
        seed=spSeed,
        synPermInactiveDec=0.01,
        synPermActiveInc=0.2,
        synPermConnected=0.11,)

    spLearnInfer = SpatialPooler(
        columnDimensions=(columnDimensions, 1),
        inputDimensions=(1, n),
        potentialRadius=n/2,
        numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
        spVerbosity=spVerbosity,
        seed=spSeed,
        synPermInactiveDec=0.01,
        synPermActiveInc=0.2,
        synPermConnected=0.11,)

    random.seed(seed)
    np.random.seed(seed)

    # Build up training set with numTrainingRecords patterns
    inputs = []         # holds post-encoded input patterns
    for i in xrange(numTrainingRecords):
      inputVector = np.zeros(n, dtype=realDType)
      inputVector [random.sample(xrange(n), w)] = 1
      inputs.append(inputVector)

    # Train each SP with identical inputs
    startTime = time.time()

    random.seed(seed)
    np.random.seed(seed)
    for i in xrange(numTrainingRecords):
      if spVerbosity > 0:
        print "Input #%d" % i
      # TODO: See https://github.com/numenta/nupic/issues/2072
      encodedInput = inputs[i]
      decodedOutput = np.zeros(columnDimensions)
      spLearnOnly.compute(encodedInput, learn=True, activeArray=decodedOutput)

    random.seed(seed)
    np.random.seed(seed)
    for i in xrange(numTrainingRecords):
      if spVerbosity > 0:
        print "Input #%d" % i
      # TODO: See https://github.com/numenta/nupic/issues/2072
      encodedInput = inputs[i]
      decodedOutput = np.zeros(columnDimensions)
      spLearnInfer.compute(encodedInput, learn=True, activeArray=decodedOutput)

    print "\nElapsed time: %.2f seconds\n" % (time.time() - startTime)

    # Test that both SP"s are identical by checking learning stats
    # A more in depth test would check all the coincidences, duty cycles, etc.
    # ala tpDiff
    # Edit: spDiff has been written as an in depth tester of the spatial pooler
    learnOnlyStats = spLearnOnly.getLearningStats()
    learnInferStats = spLearnInfer.getLearningStats()

    success = True
    # Check that the two spatial poolers are equivalent after the same training.
    success = success and spDiff(spLearnInfer, spLearnOnly)
    self.assertTrue(success)
    # Make sure that the pickled and loaded SPs are equivalent.
    spPickle = pickle.dumps(spLearnOnly, protocol=0)
    spLearnOnlyLoaded = pickle.loads(spPickle)
    success = success and spDiff(spLearnOnly, spLearnOnlyLoaded)
    self.assertTrue(success)
    for k in learnOnlyStats.keys():
      if learnOnlyStats[k] != learnInferStats[k]:
        success = False
        print "Stat", k, "is different:", learnOnlyStats[k], learnInferStats[k]

    self.assertTrue(success)
    if success:
      print "Test succeeded"
Example #30
0
    def toString(self):
        print("sentenceNum: " + self.id)
        print("startIdx " + self.startIdx)
        print("startIdx " + self.endIdx)
        print("anomalyScore " + self.anomalyScore)

        # parameters for the spatial pooler and temporal memory networks, only tm_only is used


sp_layer1 = SpatialPooler(inputDimensions=(128, 128),
                          columnDimensions=(64, 64),
                          potentialPct=0.1,
                          potentialRadius=5,
                          globalInhibition=False,
                          localAreaDensity=0.1,
                          numActiveColumnsPerInhArea=3,
                          synPermInactiveDec=0.5,
                          synPermActiveInc=0.02,
                          synPermConnected=0.90,
                          boostStrength=0.0,
                          wrapAround=False)

tm_only = TemporalMemory(
    inputDimensions=(4096, ),
    columnDimensions=(4096, ),
    cellsPerColumn=5,
    newSynapseCount=15,
    activationThreshold=15,
    initialPermanence=0.7,
    connectedPermanence=0.8,
    minThreshold=8,
Example #31
0
uintType = "uint32"
inputDimensions = (1000,1)
columnDimensions = (2048,1)
inputSize = np.array(inputDimensions).prod()
columnNumber = np.array(columnDimensions).prod()
inputArray = np.zeros(inputSize, dtype=uintType)

for i in range(inputSize):
  inputArray[i] = random.randrange(2)

activeCols = np.zeros(columnNumber, dtype=uintType)
sp = SP(inputDimensions,
  columnDimensions,
  potentialRadius = int(0.5*inputSize),
  numActiveColumnsPerInhArea = int(0.02*columnNumber),
  globalInhibition = True,
  seed = 1,
  synPermActiveInc = 0.01,
  synPermInactiveDec = 0.008
   )

# Part 1:
# -------
# A column connects to a subset of the input vector (specified
# by both the potentialRadius and potentialPct). The overlap score
# for a column is the number of connections to the input that become
# active when presented with a vector. When learning is 'on' in the SP,
# the active connections are reinforced, whereas those inactive are
# depressed (according to parameters synPermActiveInc and synPermInactiveDec.
# In order for the SP to create a sparse representation of the input, it
# will select a small percentage (usually 2%) of its most active columns,
Example #32
0
#  minval=0,
#  maxval=100)
#consumeEncoder = AdaptiveScalarEncoder(
#  n=400,
#  w=21)
consumeEncoder = SimHashDistributedScalarEncoder(n=400, w=21, resolution=0.25)
encodingWidth = (timeOfDayEncoder.getWidth() + weekendEncoder.getWidth() +
                 consumeEncoder.getWidth())
classifier = SDRClassifierFactory.create()
sp = SpatialPooler(inputDimensions=(encodingWidth, ),
                   columnDimensions=(COL_WIDTH),
                   potentialPct=0.85,
                   potentialRadius=encodingWidth,
                   globalInhibition=True,
                   localAreaDensity=-1.0,
                   numActiveColumnsPerInhArea=40,
                   synPermInactiveDec=0.005,
                   synPermActiveInc=0.04,
                   synPermConnected=0.1,
                   boostStrength=3.0,
                   seed=1956,
                   wrapAround=False)
tm = TemporalMemory(columnDimensions=(COL_WIDTH, ),
                    cellsPerColumn=32,
                    activationThreshold=16,
                    initialPermanence=0.21,
                    connectedPermanence=0.5,
                    minThreshold=12,
                    maxNewSynapseCount=20,
                    permanenceIncrement=0.1,
                    permanenceDecrement=0.1,
Example #33
0
    def __init__(self, columnCount, InputEncoderParams, toL4ConnectorParamsI,
                 toL4ConnectorParamsII, toL5ConnectorParams,
                 toD1ConnectorParams, toD2ConnectorParams, L4Params, L5Params,
                 k, D1Params, D2Params):
        self.columnCount = columnCount
        self.toL4ConnectorParamsI = toL4ConnectorParamsI
        self.toL4ConnectorParamsII = toL4ConnectorParamsII
        self.toL5ConnectorParams = toL5ConnectorParams
        self.toD1ConnectorParams = toD1ConnectorParams
        self.toD2ConnectorParams = toD2ConnectorParams
        self.L4Params = L4Params
        self.L5Params = L5Params
        self.k = k
        self.D1Params = D1Params
        self.D2Params = D2Params
        self.learning = False

        #encoder
        from nupic.encoders import MultiEncoder
        self.InputEncoder = MultiEncoder()
        self.InputEncoder.addMultipleEncoders(InputEncoderParams)
        print "Encoder Online"

        #spatialPoolers
        from nupic.algorithms.spatial_pooler import SpatialPooler
        self.toL4ConnectorI = SpatialPooler(
            inputDimensions=(toL4ConnectorParamsI["inputDimensions"], ),
            columnDimensions=(columnCount, ),
            potentialPct=toL4ConnectorParamsI["potentialPct"],
            globalInhibition=toL4ConnectorParamsI["globalInhibition"],
            localAreaDensity=toL4ConnectorParamsI["localAreaDensity"],
            numActiveColumnsPerInhArea=toL4ConnectorParamsI[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL4ConnectorParamsI["synPermInactiveDec"],
            synPermActiveInc=toL4ConnectorParamsI["synPermActiveInc"],
            synPermConnected=toL4ConnectorParamsI["synPermConnected"],
            boostStrength=toL4ConnectorParamsI["boostStrength"],
            seed=toL4ConnectorParamsI["seed"],
            wrapAround=toL4ConnectorParamsI["wrapAround"])  #this part sucks
        self.toL4ConnectorII = SpatialPooler(
            inputDimensions=(columnCount * 3, ),
            columnDimensions=(columnCount, ),
            potentialPct=toL4ConnectorParamsII["potentialPct"],
            globalInhibition=toL4ConnectorParamsII["globalInhibition"],
            localAreaDensity=toL4ConnectorParamsII["localAreaDensity"],
            numActiveColumnsPerInhArea=toL4ConnectorParamsII[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL4ConnectorParamsII["synPermInactiveDec"],
            synPermActiveInc=toL4ConnectorParamsII["synPermActiveInc"],
            synPermConnected=toL4ConnectorParamsII["synPermConnected"],
            boostStrength=toL4ConnectorParamsII["boostStrength"],
            seed=toL4ConnectorParamsII["seed"],
            wrapAround=toL4ConnectorParamsII["wrapAround"])
        print "toL4Connector Online"
        self.toL5Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toL5ConnectorParams["potentialPct"],
            globalInhibition=toL5ConnectorParams["globalInhibition"],
            localAreaDensity=toL5ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toL5ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toL5ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toL5ConnectorParams["synPermActiveInc"],
            synPermConnected=toL5ConnectorParams["synPermConnected"],
            boostStrength=toL5ConnectorParams["boostStrength"],
            seed=toL5ConnectorParams["seed"],
            wrapAround=toL5ConnectorParams["wrapAround"])
        print "toL5Connector Online"
        self.toD1Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toD1ConnectorParams["potentialPct"],
            globalInhibition=toD1ConnectorParams["globalInhibition"],
            localAreaDensity=toD1ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toD1ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toD1ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toD1ConnectorParams["synPermActiveInc"],
            synPermConnected=toD1ConnectorParams["synPermConnected"],
            boostStrength=toD1ConnectorParams["boostStrength"],
            seed=toD1ConnectorParams["seed"],
            wrapAround=toD1ConnectorParams["wrapAround"])
        print "toD1Connector Online"
        self.toD2Connector = SpatialPooler(
            inputDimensions=(columnCount, ),
            columnDimensions=(columnCount, ),
            potentialPct=toD2ConnectorParams["potentialPct"],
            globalInhibition=toD2ConnectorParams["globalInhibition"],
            localAreaDensity=toD2ConnectorParams["localAreaDensity"],
            numActiveColumnsPerInhArea=toD2ConnectorParams[
                "numActiveColumnsPerInhArea"],
            synPermInactiveDec=toD2ConnectorParams["synPermInactiveDec"],
            synPermActiveInc=toD2ConnectorParams["synPermActiveInc"],
            synPermConnected=toD2ConnectorParams["synPermConnected"],
            boostStrength=toD2ConnectorParams["boostStrength"],
            seed=toD2ConnectorParams["seed"],
            wrapAround=toD2ConnectorParams["wrapAround"])
        print "toD2Connector Online"

        #HTM Layers
        from nupic.algorithms.temporal_memory import TemporalMemory
        self.L4ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.L4 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
        )
        print "L4 Online"
        self.L5ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.L5 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
        )
        print "L5 Online"
        self.D1ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.D1 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
            initialPermanence=0.21,
            connectedPermanence=0.5,
        )
        print "D1 Online"
        self.D2ActiveColumns = numpy.zeros(self.columnCount, dtype=int)
        self.D2 = TemporalMemory(
            columnDimensions=(columnCount, ),
            seed=42,
            initialPermanence=0.21,
            connectedPermanence=0.5,
        )
        print "D2 Online"
Example #34
0
def testSP():
  """ Run a SP test
  """

  elemSize = 400
  numSet = 42

  addNear = True
  numRecords = 2

  wantPlot = True

  poolPct = 0.5
  itr = 1
  doLearn = True

  while numRecords < 3:

    # Setup a SP
    sp = SpatialPooler(
           columnDimensions=(2048, 1),
           inputDimensions=(1, elemSize),
           potentialRadius=elemSize/2,
           numActiveColumnsPerInhArea=40,
           spVerbosity=0,
           stimulusThreshold=0,
           seed=1,
           potentialPct=poolPct,
           globalInhibition=True
           )

    # Generate inputs using rand()
    inputs = generateRandomInput(numRecords, elemSize, numSet)
    if addNear:
      # Append similar entries (distance of 1)
      appendInputWithNSimilarValues(inputs, 42)

    inputSize = len(inputs)
    print 'Num random records = %d, inputs to process %d' % (numRecords, inputSize)

    # Run a number of iterations, with learning on or off,
    # retrieve results from the last iteration only
    outputs = np.zeros((inputSize,2048))

    numIter = 1
    if doLearn:
      numIter = itr

    for iter in xrange(numIter):
      for i in xrange(inputSize):
        time.sleep(0.001)
        if iter == numIter - 1:
          # TODO: See https://github.com/numenta/nupic/issues/2072
          sp.compute(inputs[i], learn=doLearn, activeArray=outputs[i])
          #print outputs[i].sum(), outputs[i]
        else:
          # TODO: See https://github.com/numenta/nupic/issues/2072
          output = np.zeros(2048)
          sp.compute(inputs[i], learn=doLearn, activeArray=output)

    # Build a plot from the generated input and output and display it
    distribMatrix = generatePlot(outputs, inputs)

    # If we don't want a plot, just continue
    if wantPlot:
      plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
      plt.ylabel('SP (2048/40) distance in %')
      plt.xlabel('Input (400/42) distance in %')

      title = 'SP distribution'
      if doLearn:
        title += ', leaning ON'
      else:
        title +=  ', learning OFF'

      title += ', inputs = %d' % len(inputs)
      title += ', iterations = %d' % numIter
      title += ', poolPct =%f' % poolPct

      plt.suptitle(title, fontsize=12)
      plt.show()
      #plt.savefig(os.path.join('~/Desktop/ExperimentResults/videos5', '%s' % numRecords))
      #plt.clf()

    numRecords += 1

  return
class DendriteDetector(AnomalyDetector):
    def initialize(self):
        # Keep track of value range for spatial anomaly detection.
        self.minVal = None
        self.maxVal = None

        # Time of day encoder
        self.timeOfDayEncoder = DateEncoder(timeOfDay=(21, 9.49),
                                            name='time_enc')
        # RDSE encoder for the time series value.
        minResolution = 0.001
        rangePadding = abs(self.inputMax - self.inputMin) * 0.2
        minVal = self.inputMin - rangePadding
        maxVal = self.inputMax + rangePadding
        numBuckets = 130
        resolution = max(minResolution, (maxVal - minVal) / numBuckets)
        self.value_enc = RandomDistributedScalarEncoder(resolution=resolution,
                                                        name='value_rdse')

        # Spatial Pooler.
        encodingWidth = self.timeOfDayEncoder.getWidth(
        ) + self.value_enc.getWidth()
        self.sp = SpatialPooler(
            inputDimensions=(encodingWidth, ),
            columnDimensions=(2048, ),
            potentialPct=0.8,
            potentialRadius=encodingWidth,
            globalInhibition=1,
            numActiveColumnsPerInhArea=40,
            synPermInactiveDec=0.0005,
            synPermActiveInc=0.003,
            synPermConnected=0.2,
            boostStrength=0.0,
            seed=1956,
            wrapAround=True,
        )

        self.tm = TemporalMemory(
            columnDimensions=(2048, ),
            cellsPerColumn=32,
            activationThreshold=20,
            initialPermanence=.5,  # Increased to connectedPermanence.
            connectedPermanence=.5,
            minThreshold=13,
            maxNewSynapseCount=31,
            permanenceIncrement=0.04,
            permanenceDecrement=0.008,
            predictedSegmentDecrement=0.001,
            maxSegmentsPerCell=128,
            maxSynapsesPerSegment=
            128,  # Changed meaning. Also see connections.topology[2]
            seed=1993,
        )

        # Initialize the anomaly likelihood object
        numentaLearningPeriod = int(math.floor(self.probationaryPeriod / 2.0))
        self.anomalyLikelihood = anomaly_likelihood.AnomalyLikelihood(
            learningPeriod=numentaLearningPeriod,
            estimationSamples=self.probationaryPeriod - numentaLearningPeriod,
            reestimationPeriod=100,
        )

        self.age = 0

    def getAdditionalHeaders(self):
        """Returns a list of strings."""
        return ["raw_score"]

    def handleRecord(self, inputData):
        """
    Argument inputData is {"value": instantaneous_value, "timestamp": pandas.Timestamp}
    Returns a tuple (anomalyScore, rawScore).

    Internally to NuPIC "anomalyScore" corresponds to "likelihood_score"
    and "rawScore" corresponds to "anomaly_score". Sorry about that.
    """

        # Check for spatial anomalies and update min/max values.
        value = inputData["value"]
        spatialAnomaly = False
        if self.minVal != self.maxVal:
            tolerance = (self.maxVal - self.minVal) * SPATIAL_TOLERANCE
            maxExpected = self.maxVal + tolerance
            minExpected = self.minVal - tolerance
            if value > maxExpected or value < minExpected:
                spatialAnomaly = True
        if self.maxVal is None or value > self.maxVal:
            self.maxVal = value
        if self.minVal is None or value < self.minVal:
            self.minVal = value

        # Run the HTM stack.  First Encoders.
        timestamp = inputData["timestamp"]
        timeOfDayBits = np.zeros(self.timeOfDayEncoder.getWidth())
        self.timeOfDayEncoder.encodeIntoArray(timestamp, timeOfDayBits)
        valueBits = np.zeros(self.value_enc.getWidth())
        self.value_enc.encodeIntoArray(value, valueBits)
        encoding = np.concatenate([timeOfDayBits, valueBits])
        # Spatial Pooler.
        activeColumns = np.zeros(self.sp.getNumColumns())
        self.sp.compute(encoding, True, activeColumns)
        activeColumnIndices = np.nonzero(activeColumns)[0]
        # Temporal Memory and Anomaly.
        predictions = self.tm.getPredictiveCells()
        predictedColumns = list(self.tm.mapCellsToColumns(predictions).keys())
        self.tm.compute(activeColumnIndices, learn=True)
        activeCells = self.tm.getActiveCells()
        rawScore = anomaly.computeRawAnomalyScore(activeColumnIndices,
                                                  predictedColumns)

        # Compute log(anomaly likelihood)
        anomalyScore = self.anomalyLikelihood.anomalyProbability(
            inputData["value"], rawScore, inputData["timestamp"])
        finalScore = logScore = self.anomalyLikelihood.computeLogLikelihood(
            anomalyScore)

        if spatialAnomaly:
            finalScore = 1.0

        if False:
            # Plot correlation of excitement versus compartmentalization.
            if self.age == 0:
                print("Correlation Plots ENABLED.")
            if False:
                start_age = 1000
                end_age = 1800
            else:
                start_age = 4000
                end_age = 7260
            if self.age == start_age:
                import correlation
                import random
                self.cor_samplers = []
                sampled_cells = []
                while len(self.cor_samplers) < 20:
                    n = random.choice(xrange(self.tm.numberOfCells()))
                    if n in sampled_cells:
                        continue
                    else:
                        sampled_cells.append(n)
                    neuron = self.tm.connections.dataForCell(n)
                    if neuron._roots:
                        c = correlation.CorrelationSampler(neuron._roots[0])
                        c.random_sample_points(100)
                        self.cor_samplers.append(c)
                print("Created %d Correlation Samplers" %
                      len(self.cor_samplers))
            if self.age >= start_age:
                for smplr in self.cor_samplers:
                    smplr.sample()
            if self.age == end_age:
                import matplotlib.pyplot as plt
                for idx, smplr in enumerate(self.cor_samplers):
                    if smplr.num_samples == 0:
                        print("No samples, plot not shown.")
                        continue
                    plt.figure("Sample %d" % idx)
                    smplr.plot(period=64)  # Different value!
                plt.show()

        if False:
            # Plot excitement of a typical detection on a dendrite.
            if self.age == 7265:
                #if self.age == 1800:
                import matplotlib.pyplot as plt
                import random
                from connections import SYN_CONNECTED_ACTIVE
                sampled_cells = set()
                for figure_num in xrange(40):
                    plt.figure("(%d)" % figure_num)
                    # Find an active cell to view.
                    cell = None
                    for attempt in range(100):
                        event = random.choice(self.tm.activeEvents)
                        cell = event.cell  # This is an integer.
                        if cell is not None and cell not in sampled_cells:
                            break
                    else:
                        break
                    sampled_cells.add(cell)
                    cell = self.tm.connections.dataForCell(cell)
                    # Organize the data.
                    EPSPs = []
                    excitement = []
                    distance_to_root = 0
                    segment_offsets = {}
                    branch = cell._roots[0]
                    while True:
                        segment_offsets[branch] = distance_to_root
                        distance_to_root += len(branch._synapses)
                        excitement.extend(branch.excitement)
                        for syn in branch._synapses:
                            if syn is None:
                                EPSPs.append(0)
                            else:
                                EPSPs.append(syn.state == SYN_CONNECTED_ACTIVE)
                        if branch.children:
                            branch = random.choice(branch.children)
                        else:
                            break
                    plt.plot(
                        np.arange(distance_to_root),
                        EPSPs,
                        'r',
                        np.arange(distance_to_root),
                        excitement,
                        'b',
                    )
                    plt.title(
                        "Dendrite Activation\n Horizontal line is activation threshold, Vertical lines are segment bifurcations"
                    )
                    plt.xlabel("Distance along Dendrite", )
                    plt.ylabel("EPSPs are Red, Excitement is Blue")
                    # Show lines where the excitement crosses thresholds.
                    plt.axhline(20, color='k')  # Hard coded parameter value.
                    for offset in segment_offsets.values():
                        if offset != 0:
                            plt.axvline(offset, color='k')
                print("\nShowing %d excitement plots." % len(sampled_cells))
                plt.show()

        self.age += 1

        return (finalScore, rawScore)
Example #36
0
def testSPNew():
  """ New version of the test"""

  elemSize = 400
  numSet = 42

  addNear = True
  numRecords = 1000

  wantPlot = False

  poolPct = 0.5
  itr = 5

  pattern = [60, 1000]
  doLearn = True
  start = 1
  learnIter = 0
  noLearnIter = 0

  numLearns = 0
  numTests = 0


  numIter = 1

  numGroups = 1000


  PLOT_PRECISION = 100.0
  distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))

  inputs = generateRandomInput(numGroups, elemSize, numSet)


  # Setup a SP
  sp = SpatialPooler(
         columnDimensions=(2048, 1),
         inputDimensions=(1, elemSize),
         potentialRadius=elemSize/2,
         numActiveColumnsPerInhArea=40,
         spVerbosity=0,
         stimulusThreshold=0,
         synPermConnected=0.12,
         seed=1,
         potentialPct=poolPct,
         globalInhibition=True
         )

  cleanPlot = False

  for i in xrange(numRecords):
    input1 = getRandomWithMods(inputs, 4)
    if i % 2 == 0:
      input2 = getRandomWithMods(inputs, 4)
    else:
      input2 = input1.copy()
      input2 = modifyBits(input2, 21)

    inDist = (abs(input1-input2) > 0.1)
    intInDist = int(inDist.sum()/2+0.1)
    #print intInDist

    if start == 0:
      doLearn = True
      learnIter += 1
      if learnIter == pattern[start]:
        numLearns += 1
        start = 1
        noLearnIter = 0
    elif start == 1:
      doLearn = False
      noLearnIter += 1
      if noLearnIter == pattern[start]:
        numTests += 1
        start = 0
        learnIter = 0
        cleanPlot = True

    # TODO: See https://github.com/numenta/nupic/issues/2072
    sp.compute(input1, learn=doLearn, activeArray=output1)
    sp.compute(input2, learn=doLearn, activeArray=output2)
    time.sleep(0.001)

    outDist = (abs(output1-output2) > 0.1)
    intOutDist = int(outDist.sum()/2+0.1)

    if not doLearn and intOutDist < 2 and intInDist > 10:
      """
      sp.spVerbosity = 10
      # TODO: See https://github.com/numenta/nupic/issues/2072
      sp.compute(input1, learn=doLearn, activeArray=output1)
      sp.compute(input2, learn=doLearn, activeArray=output2)
      sp.spVerbosity = 0


      print 'Elements has very small SP distance: %d' % intOutDist
      print output1.nonzero()
      print output2.nonzero()
      print sp._firingBoostFactors[output1.nonzero()[0]]
      print sp._synPermBoostFactors[output1.nonzero()[0]]
      print 'Input elements distance is %d' % intInDist
      print input1.nonzero()
      print input2.nonzero()
      sys.stdin.readline()
      """

    if not doLearn:
      x = int(PLOT_PRECISION*intOutDist/40.0)
      y = int(PLOT_PRECISION*intInDist/42.0)
      if distribMatrix[x, y] < 0.1:
        distribMatrix[x, y] = 3
      else:
        if distribMatrix[x, y] < 10:
          distribMatrix[x, y] += 1

    #print i

    # If we don't want a plot, just continue
    if wantPlot and cleanPlot:
      plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
      plt.ylabel('SP (2048/40) distance in %')
      plt.xlabel('Input (400/42) distance in %')

      title = 'SP distribution'

      #if doLearn:
      #  title += ', leaning ON'
      #else:
      #  title +=  ', learning OFF'

      title += ', learn sets = %d' % numLearns
      title += ', test sets = %d' % numTests
      title += ', iter = %d' % numIter
      title += ', groups = %d' % numGroups
      title += ', Pct =%f' % poolPct

      plt.suptitle(title, fontsize=12)
      #plt.show()

      plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosNew', '%s' % i))

      plt.clf()
      distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
      cleanPlot = False
Example #37
0
if __name__ == "__main__":
    # Get training images and convert them to vectors.
    trainingImages, trainingTags = dataset_readers.getImagesAndTags(
        trainingDataset)
    trainingVectors = encoder.imagesToVectors(trainingImages)

    # Instantiate the python spatial pooler
    sp = SpatialPooler(
        inputDimensions=32**2,  # Size of image patch
        columnDimensions=16,  # Number of potential features
        potentialRadius=10000,  # Ensures 100% potential pool
        potentialPct=1,  # Neurons can connect to 100% of input
        globalInhibition=True,
        localAreaDensity=-1,  # Using numActiveColumnsPerInhArea
        #localAreaDensity=0.02, # one percent of columns active at a time
        #numActiveColumnsPerInhArea=-1, # Using percentage instead
        numActiveColumnsPerInhArea=1,  # Only one feature active at a time
        # All input activity can contribute to feature output
        stimulusThreshold=0,
        synPermInactiveDec=0.3,
        synPermActiveInc=0.3,
        synPermConnected=0.3,  # Connected threshold
        boostStrength=2,
        seed=1956,  # The seed that Grok uses
        spVerbosity=1)

    # Instantiate the spatial pooler test bench.
    tb = VisionTestBench(sp)

    # Instantiate the classifier
    clf = exactMatch()
Example #38
0
def testSPFile():
  """ Run test on the data file - the file has records previously encoded.
  """

  spSize = 2048
  spSet = 40

  poolPct = 0.5

  pattern = [50, 1000]
  doLearn = True

  PLOT_PRECISION = 100.0
  distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))

  inputs = []


  #file = open('~/Desktop/ExperimentResults/sampleArtificial.csv', 'rb')
  #elemSize = 400
  #numSet = 42

  #file = open('~/Desktop/ExperimentResults/sampleDataBasilOneField.csv', 'rb')
  #elemSize = 499
  #numSet = 7

  outdir = '~/Desktop/ExperimentResults/Basil100x21'
  inputFile = outdir+'.csv'
  file = open(inputFile, 'rb')

  elemSize = 100
  numSet = 21

  reader = csv.reader(file)

  for row in reader:
    input = np.array(map(float, row), dtype=realDType)
    if len(input.nonzero()[0]) != numSet:
      continue

    inputs.append(input.copy())

  file.close()

  # Setup a SP
  sp = SpatialPooler(
         columnDimensions=(spSize, 1),
         inputDimensions=(1, elemSize),
         potentialRadius=elemSize/2,
         numActiveColumnsPerInhArea=spSet,
         spVerbosity=0,
         stimulusThreshold=0,
         synPermConnected=0.10,
         seed=1,
         potentialPct=poolPct,
         globalInhibition=True
         )

  cleanPlot = False


  doLearn = False

  print 'Finished reading file, inputs/outputs to process =', len(inputs)

  size = len(inputs)

  for iter in xrange(100):

    print 'Iteration', iter

    # Learn
    if iter != 0:
      for learnRecs in xrange(pattern[0]):

        # TODO: See https://github.com/numenta/nupic/issues/2072
        ind = np.random.random_integers(0, size-1, 1)[0]
        sp.compute(inputs[ind], learn=True, activeArray=outputs[ind])

    # Test
    for _ in xrange(pattern[1]):
      rand1 = np.random.random_integers(0, size-1, 1)[0]
      rand2 = np.random.random_integers(0, size-1, 1)[0]

      sp.compute(inputs[rand1], learn=False, activeArray=output1)
      sp.compute(inputs[rand2], learn=False, activeArray=output2)

      outDist = (abs(output1-output2) > 0.1)
      intOutDist = int(outDist.sum()/2+0.1)

      inDist = (abs(inputs[rand1]-inputs[rand2]) > 0.1)
      intInDist = int(inDist.sum()/2+0.1)

      if intInDist != numSet or intOutDist != spSet:
        print rand1, rand2, '-', intInDist, intOutDist

      x = int(PLOT_PRECISION*intOutDist/spSet)
      y = int(PLOT_PRECISION*intInDist/numSet)
      if distribMatrix[x, y] < 0.1:
        distribMatrix[x, y] = 3
      else:
        if distribMatrix[x, y] < 10:
          distribMatrix[x, y] += 1

    if True:
      plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
      plt.ylabel('SP (%d/%d) distance in pct' % (spSize, spSet))
      plt.xlabel('Input (%d/%d) distance in pct' % (elemSize, numSet))

      title = 'SP distribution'
      title += ', iter = %d' % iter
      title += ', Pct =%f' % poolPct

      plt.suptitle(title, fontsize=12)

      #plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosArtData', '%s' % iter))
      plt.savefig(os.path.join(outdir, '%s' % iter))

      plt.clf()
      distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
Example #39
0
import numpy as np
from tqdm import tqdm

from nupic.encoders.scalar import ScalarEncoder
from nupic.algorithms.spatial_pooler import SpatialPooler
from nupic.algorithms.temporal_memory import TemporalMemory
from nupic.algorithms.sdr_classifier import SDRClassifier

N = 900
x = np.sin(np.arange(N) * 2 * np.pi / 30.0)
inputDimensions = (256, )
columnDimensions = (512, )

encoder = ScalarEncoder(21, -1.0, 1.0, n=inputDimensions[0])
sp = SpatialPooler(inputDimensions=inputDimensions,
                   columnDimensions=columnDimensions,
                   globalInhibition=True,
                   numActiveColumnsPerInhArea=21)
tm = TemporalMemory(columnDimensions=columnDimensions)
c = SDRClassifier(steps=[1], alpha=0.1, actValueAlpha=0.1, verbosity=0)

x_true = x[1:]
x_predict = np.zeros(len(x) - 1)

for i, xi in tqdm(enumerate(x[:-1])):
    encoded = encoder.encode(xi)
    bucketIdx = np.where(encoded > 0)[0][0]
    spd = np.zeros(columnDimensions[0])
    sp.compute(encoded, True, spd)
    active_indices = np.where(spd > 0)[0]
    tm.compute(active_indices)
Example #40
0
        # Pick a combination of parameter values
        parameters.nextCombination()
        #parameters.nextRandomCombination()
        synPermConn = parameters.getValue("synPermConn")
        synPermDec = synPermConn * parameters.getValue("synPermDecFrac")
        synPermInc = synPermConn * parameters.getValue("synPermIncFrac")

        # Instantiate our spatial pooler
        sp = SpatialPooler(
            inputDimensions=(32, 32),  # Size of image patch
            columnDimensions=(32, 32),
            potentialRadius=10000,  # Ensures 100% potential pool
            potentialPct=0.8,
            globalInhibition=True,
            localAreaDensity=-1,  # Using numActiveColumnsPerInhArea
            numActiveColumnsPerInhArea=64,
            # All input activity can contribute to feature output
            stimulusThreshold=0,
            synPermInactiveDec=synPermDec,
            synPermActiveInc=synPermInc,
            synPermConnected=synPermConn,
            boostStrength=1.0,
            seed=1956,  # The seed that Grok uses
            spVerbosity=1)

        # Instantiate the spatial pooler test bench.
        tb = VisionTestBench(sp)

        # Instantiate the classifier
        clf = KNNClassifier()

        # Train the spatial pooler on trainingVectors.
def runHotgym(numRecords):
    with open(_PARAMS_PATH, "r") as f:
        modelParams = yaml.safe_load(f)["modelParams"]
        enParams = modelParams["sensorParams"]["encoders"]
        spParams = modelParams["spParams"]
        tmParams = modelParams["tmParams"]

    scalarEncoder = RandomDistributedScalarEncoder(
        enParams["consumption"]["resolution"])
    scalarEncoder2 = RandomDistributedScalarEncoder(
        enParams["consumption2"]["resolution"])

    encodingWidth = (scalarEncoder.getWidth() + scalarEncoder2.getWidth())

    sp = SpatialPooler(
        inputDimensions=(encodingWidth, ),
        columnDimensions=(spParams["columnCount"], ),
        potentialPct=spParams["potentialPct"],
        potentialRadius=encodingWidth,
        globalInhibition=spParams["globalInhibition"],
        localAreaDensity=spParams["localAreaDensity"],
        numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
        synPermInactiveDec=spParams["synPermInactiveDec"],
        synPermActiveInc=spParams["synPermActiveInc"],
        synPermConnected=spParams["synPermConnected"],
        boostStrength=spParams["boostStrength"],
        seed=spParams["seed"],
        wrapAround=True)

    tm = TemporalMemory(
        columnDimensions=(tmParams["columnCount"], ),
        cellsPerColumn=tmParams["cellsPerColumn"],
        activationThreshold=tmParams["activationThreshold"],
        initialPermanence=tmParams["initialPerm"],
        connectedPermanence=spParams["synPermConnected"],
        minThreshold=tmParams["minThreshold"],
        maxNewSynapseCount=tmParams["newSynapseCount"],
        permanenceIncrement=tmParams["permanenceInc"],
        permanenceDecrement=tmParams["permanenceDec"],
        predictedSegmentDecrement=0.0,
        maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
        maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
        seed=tmParams["seed"])

    classifier = SDRClassifierFactory.create()
    results = []
    with open(_INPUT_FILE_PATH, "r") as fin:
        reader = csv.reader(fin)
        headers = reader.next()
        reader.next()
        reader.next()

        output = output_anomaly_generic_v1.NuPICFileOutput(_FILE_NAME)

        for count, record in enumerate(reader):

            if count >= numRecords: break

            # Convert data string into Python date object.
            #      dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
            # Convert data value string into float.
            prediction = float(record[1])
            prediction2 = float(record[2])

            # To encode, we need to provide zero-filled numpy arrays for the encoders
            # to populate.
            consumptionBits = numpy.zeros(scalarEncoder.getWidth())
            consumptionBits2 = numpy.zeros(scalarEncoder2.getWidth())

            # Now we call the encoders to create bit representations for each value.
            scalarEncoder.encodeIntoArray(prediction, consumptionBits)
            scalarEncoder2.encodeIntoArray(prediction2, consumptionBits2)

            # Concatenate all these encodings into one large encoding for Spatial
            # Pooling.
            encoding = numpy.concatenate([consumptionBits, consumptionBits2])

            # Create an array to represent active columns, all initially zero. This
            # will be populated by the compute method below. It must have the same
            # dimensions as the Spatial Pooler.
            activeColumns = numpy.zeros(spParams["columnCount"])

            # Execute Spatial Pooling algorithm over input space.
            sp.compute(encoding, True, activeColumns)
            activeColumnIndices = numpy.nonzero(activeColumns)[0]

            # Execute Temporal Memory algorithm over active mini-columns.
            tm.compute(activeColumnIndices, learn=True)

            activeCells = tm.getActiveCells()

            # Get the bucket info for this input value for classification.
            bucketIdx = scalarEncoder.getBucketIndices(prediction)[0]

            # Run classifier to translate active cells back to scalar value.
            classifierResult = classifier.compute(recordNum=count,
                                                  patternNZ=activeCells,
                                                  classification={
                                                      "bucketIdx": bucketIdx,
                                                      "actValue": prediction
                                                  },
                                                  learn=True,
                                                  infer=True)

            # Print the best prediction for 1 step out.
            oneStepConfidence, oneStep = sorted(zip(
                classifierResult[1], classifierResult["actualValues"]),
                                                reverse=True)[0]
            # print("1-step: {:16} ({:4.4}%)".format(oneStep, oneStepConfidence * 100))
            #      results.append([oneStep, oneStepConfidence * 100, None, None])
            results.append(
                [record[0], prediction, oneStep, oneStepConfidence * 100])
            output.write(record[0], prediction, oneStep,
                         oneStepConfidence * 100)

        output.close()
        return results
Example #42
0
EncodingWidth, SpatialPoolerWidth1 = 750, 600

sp = SpatialPooler(
    # How large the input encoding will be.
    inputDimensions=(EncodingWidth),
    # How many mini-columns will be in the Spatial Pooler.
    columnDimensions=(SpatialPoolerWidth1),
    # What percent of the columns's receptive field is available for potential
    # synapses?
    potentialPct=0.85,
    # This means that the input space has no topology.
    globalInhibition=True,
    localAreaDensity=-1.0,
    # Roughly 2%, giving that there is only one inhibition area because we have
    # turned on globalInhibition (40 / 2048 = 0.0195)
    numActiveColumnsPerInhArea=30.0,
    # How quickly synapses grow and degrade.
    synPermInactiveDec=0.005,
    synPermActiveInc=0.04,
    synPermConnected=0.1,
    # boostStrength controls the strength of boosting. Boosting encourages
    # efficient usage of SP columns.
    boostStrength=3.0,
    # Random number generator seed.
    seed=1956,
    # Determines if inputs at the beginning and end of an input dimension should
    # be considered neighbors when mapping columns to inputs.
    wrapAround=False)

# Array which contains the output of the spatial pooler for layer 1
activeColumns = np.zeros(SpatialPoolerWidth1)