Example #1
0
def main(SEED):
    # input 生成
    numOnBitsPerPattern = 3
    (numCols, trainingSequences) = buildOverlappedSequences(
            numSequences        = 2,        # 生成するsequenceの数
            seqLen              = 5,        # sequenceの長さ
            sharedElements      = [2,3],    # 異なるsequence間で同じものが含まれている番号
            numOnBitsPerPattern = 3,        # activeになるカラム数
            patternOverlap      = 0         # activeになるカラムが重なっている数
            )


    print numCols
    for sequence in trainingSequences:
        print sequence


    # TP生成
    tp = TP(
            numberOfCols          = numCols,
            cellsPerColumn        = 2,
            initialPerm           = 0.6,
            connectedPerm         = 0.5,
            minThreshold          = 3,
            newSynapseCount       = 3,
            permanenceInc         = 0.1,
            permanenceDec         = 0.0,
            activationThreshold   = 3,
            globalDecay           = 0.0,
            burnIn                = 1,
            seed                  = SEED,
            verbosity             = 0,
            checkSynapseConsistency  = True,
            pamLength                = 1
            )

    # TP学習
    for _ in range(10):
        for seq_num, sequence in enumerate(trainingSequences):
            for x in sequence:
                x = numpy.array(x).astype('float32')
                tp.compute(x, enableLearn = True, computeInfOutput=True)
                #tp.printStates(False, False)
            tp.reset()


    # TP 予測
    for seq_num, sequence in enumerate(trainingSequences):
        for x in sequence:
            x = numpy.array(x).astype('float32')
            tp.compute(x, enableLearn = False, computeInfOutput = True)
            tp.printStates(False, False)
Example #2
0
  print "\n\n--------","ABCDE"[j],"-----------"
  print "Raw input vector\n",formatRow(x[j])
  
  # Send each vector to the TP, with learning turned off
  tp.compute(x[j], enableLearn = False, computeInfOutput = True)
  
  # This method prints out the active state of each cell followed by the
  # predicted state of each cell. For convenience the cells are grouped
  # 10 at a time. When there are multiple cells per column the printout
  # is arranged so the cells in a column are stacked together
  #
  # What you should notice is that the columns where active state is 1
  # represent the SDR for the current input pattern and the columns where
  # predicted state is 1 represent the SDR for the next expected pattern
  print "\nAll the active and predicted cells:"
  tp.printStates(printPrevious = False, printLearnState = False)
  
  # tp.getPredictedState() gets the predicted cells.
  # predictedCells[c][i] represents the state of the i'th cell in the c'th
  # column. To see if a column is predicted, we can simply take the OR
  # across all the cells in that column. In numpy we can do this by taking 
  # the max along axis 1.
  print "\n\nThe following columns are predicted by the temporal pooler. This"
  print "should correspond to columns in the *next* item in the sequence."
  predictedCells = tp.getPredictedState()
  print formatRow(predictedCells.max(axis=1).nonzero())


#######################################################################
#
# This command prints the segments associated with every single cell. This is
Example #3
0
    # zeros out all the states. It is not strictly necessary but it's a bit
    # messier without resets, and the TP learns quicker with resets.
    tp.reset()

#######################################################################
#
# Step 3: send the same sequence of vectors and look at predictions made by
# temporal pooler
for j in range(5):
    print "\n\n--------", "ABCDE"[j], "-----------"
    print "Raw input vector\n", formatRow(x[j])

    # Send each vector to the TP, with learning turned off
    tp.compute(x[j], enableLearn=False, computeInfOutput=True)

    # This method prints out the active state of each cell followed by the
    # predicted state of each cell. For convenience the cells are grouped
    # 10 at a time. When there are multiple cells per column the printout
    # is arranged so the cells in a column are stacked together
    #
    # What you should notice is that the columns where active state is 1
    # represent the SDR for the current input pattern and the columns where
    # predicted state is 1 represent the SDR for the next expected pattern
    tp.printStates(printPrevious=False, printLearnState=False)

#######################################################################
#
# This command prints the segments associated with every single cell
# This is commented out because it prints a ton of stuff.
#tp.printCells()
Example #4
0
def main():

    # create Temporal Pooler instance
    tp = TP(numberOfCols=50,           # カラム数
            cellsPerColumn=2,          # 1カラム中のセル数
            initialPerm=0.5,           # initial permanence
            connectedPerm=0.5,         # permanence の閾値
            minThreshold=10,           # 末梢樹状セグメントの閾値の下限?
            newSynapseCount=10,        # ?
            permanenceInc=0.1,         # permanenceの増加
            permanenceDec=0.0,         # permanenceの減少
            activationThreshold=8,     # synapseの発火がこれ以上かを確認している.
            globalDecay=0,             # decrease permanence?
            burnIn=1,                  # Used for evaluating the prediction score
            checkSynapseConsistency=False,
            pamLength=10               # Number of time steps
            )

    # create input vectors to feed to the temporal pooler.
    # Each input vector must be numberOfCols wide.
    # Here we create a simple sequence of 5 vectors
    # representing the sequence A -> B -> C -> D -> E
    x = numpy.zeros((5,tp.numberOfCols), dtype="uint32")
    x[0, 0:10] = 1     # A
    x[1,10:20] = 1     # B
    x[2,20:30] = 1     # C
    x[3,30:40] = 1     # D
    x[4,40:50] = 1     # E

    print x


    # repeat the sequence 10 times
    for i in range(10):
        # Send each letter in the sequence in order
        # A -> B -> C -> D -> E
        print
        print
        print '#### :', i
        for j in range(5):
            tp.compute(x[j], enableLearn = True, computeInfOutput=True)
            #tp.printCells(predictedOnly=False)
            tp.printStates(printPrevious = False, printLearnState = False)

        # sequenceの最後を教える. 絶対必要なわけではないが, あった方が学習速い.
        tp.reset()


    for j in range(5):
        print "\n\n--------","ABCDE"[j],"-----------"
        print "Raw input vector\n",formatRow(x[j])

        # Send each vector to the TP, with learning turned off
        tp.compute(x[j], enableLearn = False, computeInfOutput = True)

        # print predict state
        print "\nAll the active and predicted cells:"
        tp.printStates(printPrevious = False, printLearnState = False)

        # get predict state
        print "\n\nThe following columns are predicted by the temporal pooler. This"
        print "should correspond to columns in the *next* item in the sequence."
        predictedCells = tp.getPredictedState()
        print formatRow(predictedCells.max(axis=1).nonzero())