Example #1
0
class Client(object):

  def __init__(self):
    self.tp = TP(numberOfCols=16384, cellsPerColumn=8,
                initialPerm=0.5, connectedPerm=0.5,
                minThreshold=164, newSynapseCount=164,
                permanenceInc=0.1, permanenceDec=0.0,
                activationThreshold=164, # 1/2 of the on bits = (16384 * .02) / 2
                globalDecay=0, burnIn=1,
                checkSynapseConsistency=False,
                pamLength=10)


  def feed(self, sdr):
    tp = self.tp
    narr = numpy.array(sdr, dtype="uint32")
    tp.compute(narr, enableLearn = True, computeInfOutput = True)

    predicted_cells = tp.getPredictedState()
    # print predicted_cells.tolist()
    predicted_columns = predicted_cells.max(axis=1)
    # print predicted_columns.tolist()
    # import pdb; pdb.set_trace()
    return predicted_columns.nonzero()[0].tolist()


  def reset(self):
    self.tp.reset()
Example #2
0
def main(SEED):
    # input 生成
    numOnBitsPerPattern = 3
    (numCols, trainingSequences) = buildOverlappedSequences(
            numSequences        = 2,        # 生成するsequenceの数
            seqLen              = 5,        # sequenceの長さ
            sharedElements      = [2,3],    # 異なるsequence間で同じものが含まれている番号
            numOnBitsPerPattern = 3,        # activeになるカラム数
            patternOverlap      = 0         # activeになるカラムが重なっている数
            )


    print numCols
    for sequence in trainingSequences:
        print sequence


    # TP生成
    tp = TP(
            numberOfCols          = numCols,
            cellsPerColumn        = 2,
            initialPerm           = 0.6,
            connectedPerm         = 0.5,
            minThreshold          = 3,
            newSynapseCount       = 3,
            permanenceInc         = 0.1,
            permanenceDec         = 0.0,
            activationThreshold   = 3,
            globalDecay           = 0.0,
            burnIn                = 1,
            seed                  = SEED,
            verbosity             = 0,
            checkSynapseConsistency  = True,
            pamLength                = 1
            )

    # TP学習
    for _ in range(10):
        for seq_num, sequence in enumerate(trainingSequences):
            for x in sequence:
                x = numpy.array(x).astype('float32')
                tp.compute(x, enableLearn = True, computeInfOutput=True)
                #tp.printStates(False, False)
            tp.reset()


    # TP 予測
    for seq_num, sequence in enumerate(trainingSequences):
        for x in sequence:
            x = numpy.array(x).astype('float32')
            tp.compute(x, enableLearn = False, computeInfOutput = True)
            tp.printStates(False, False)
Example #3
0
class Client(object):
    def __init__(self,
                 numberOfCols=1024,
                 cellsPerColumn=8,
                 initialPerm=0.5,
                 connectedPerm=0.5,
                 minThreshold=164,
                 newSynapseCount=164,
                 permanenceInc=0.1,
                 permanenceDec=0.0,
                 activationThreshold=20,
                 pamLength=10):

        self.tp = TP(
            numberOfCols=numberOfCols,
            cellsPerColumn=cellsPerColumn,
            initialPerm=initialPerm,
            connectedPerm=connectedPerm,
            minThreshold=minThreshold,
            newSynapseCount=newSynapseCount,
            permanenceInc=permanenceInc,
            permanenceDec=permanenceDec,

            # 1/2 of the on bits = (1024 * .02) / 2
            activationThreshold=activationThreshold,
            globalDecay=0,
            burnIn=1,
            checkSynapseConsistency=False,
            pamLength=pamLength)

    def feed(self, sdr):
        tp = self.tp
        narr = numpy.array(sdr, dtype="uint32")
        tp.compute(narr, enableLearn=True, computeInfOutput=True)

        predicted_cells = tp.getPredictedState()
        # print predicted_cells.tolist()
        predicted_columns = predicted_cells.max(axis=1)
        # print predicted_columns.tolist()
        # import pdb; pdb.set_trace()
        return predicted_columns.nonzero()[0].tolist()

    def printParameters(self):
        """
    Print CLA parameters
    """
        self.tp.printParameters()

    def reset(self):
        self.tp.reset()
Example #4
0
class Model():


  def __init__(self,
               numberOfCols=16384, cellsPerColumn=8,
                initialPerm=0.5, connectedPerm=0.5,
                minThreshold=164, newSynapseCount=164,
                permanenceInc=0.1, permanenceDec=0.0,
                activationThreshold=164,
                pamLength=10,
                checkpointDir=None):

    self.tp = TP(numberOfCols=numberOfCols, cellsPerColumn=cellsPerColumn,
                initialPerm=initialPerm, connectedPerm=connectedPerm,
                minThreshold=minThreshold, newSynapseCount=newSynapseCount,
                permanenceInc=permanenceInc, permanenceDec=permanenceDec,
                
                # 1/2 of the on bits = (16384 * .02) / 2
                activationThreshold=activationThreshold,
                globalDecay=0, burnIn=1,
                checkSynapseConsistency=False,
                pamLength=pamLength)

    self.checkpointDir = checkpointDir
    self.checkpointPath = None
    self._initCheckpoint()


  def _initCheckpoint(self):
    if self.checkpointDir:
      if not os.path.exists(self.checkpointDir):
        os.makedirs(self.checkpointDir)

      self.checkpointPath = self.checkpointDir + "/model.data"


  def canCheckpoint(self):
    return self.checkpointDir != None


  def hasCheckpoint(self):
    return os.path.exists(self.checkpointPath)


  def load(self):
    if not self.checkpointDir:
      raise(Exception("No checkpoint directory specified"))

    if not self.hasCheckpoint():
      raise(Exception("Could not find checkpoint file"))
      
    self.tp.loadFromFile(self.checkpointPath)


  def save(self):
    if not self.checkpointDir:
      raise(Exception("No checkpoint directory specified"))

    self.tp.saveToFile(self.checkpointPath)


  def feedTerm(self, term):
    """ Feed a Term to model, returning next predicted Term """
    tp = self.tp
    array = numpy.array(term.toArray(), dtype="uint32")
    tp.compute(array, enableLearn = True, computeInfOutput = True)

    predictedCells = tp.getPredictedState()
    predictedColumns = predictedCells.max(axis=1)
    
    predictedBitmap = predictedColumns.nonzero()[0].tolist()
    return Term().createFromBitmap(predictedBitmap)
  

  def resetSequence(self):
    self.tp.reset()
Example #5
0
class Model():
    def __init__(self,
                 numberOfCols=16384,
                 cellsPerColumn=8,
                 initialPerm=0.5,
                 connectedPerm=0.5,
                 minThreshold=164,
                 newSynapseCount=164,
                 permanenceInc=0.1,
                 permanenceDec=0.0,
                 activationThreshold=164,
                 pamLength=10,
                 checkpointDir=None):

        self.tp = TP(
            numberOfCols=numberOfCols,
            cellsPerColumn=cellsPerColumn,
            initialPerm=initialPerm,
            connectedPerm=connectedPerm,
            minThreshold=minThreshold,
            newSynapseCount=newSynapseCount,
            permanenceInc=permanenceInc,
            permanenceDec=permanenceDec,

            # 1/2 of the on bits = (16384 * .02) / 2
            activationThreshold=activationThreshold,
            globalDecay=0,
            burnIn=1,
            checkSynapseConsistency=False,
            pamLength=pamLength)

        self.checkpointDir = checkpointDir
        self.checkpointPath = None
        self._initCheckpoint()

    def _initCheckpoint(self):
        if self.checkpointDir:
            if not os.path.exists(self.checkpointDir):
                os.mkdir(self.checkpointDir)

            self.checkpointPath = self.checkpointDir + "/model.data"

    def canCheckpoint(self):
        return self.checkpointDir != None

    def hasCheckpoint(self):
        return os.path.exists(self.checkpointPath)

    def load(self):
        if not self.checkpointDir:
            raise (Exception("No checkpoint directory specified"))

        if not self.hasCheckpoint():
            raise (Exception("Could not find checkpoint file"))

        self.tp.loadFromFile(self.checkpointPath)

    def save(self):
        if not self.checkpointDir:
            raise (Exception("No checkpoint directory specified"))

        self.tp.saveToFile(self.checkpointPath)

    def feedTerm(self, term):
        """ Feed a Term to model, returning next predicted Term """
        tp = self.tp
        array = numpy.array(term.toArray(), dtype="uint32")
        tp.compute(array, enableLearn=True, computeInfOutput=True)

        predictedCells = tp.getPredictedState()
        predictedColumns = predictedCells.max(axis=1)

        predictedBitmap = predictedColumns.nonzero()[0].tolist()
        return Term().createFromBitmap(predictedBitmap)

    def resetSequence(self):
        self.tp.reset()
Example #6
0
    # The compute method performs one step of learning and/or inference. Note:
    # here we just perform learning but you can perform prediction/inference and
    # learning in the same step if you want (online learning).
    tp.compute(x[j], enableLearn = True, computeInfOutput = False)

    # This function prints the segments associated with every cell.    
    # If you really want to understand the TP, uncomment this line. By following
    # every step you can get an excellent understanding for exactly how the TP
    # learns.
    #tp.printCells()

  # The reset command tells the TP that a sequence just ended and essentially
  # zeros out all the states. It is not strictly necessary but it's a bit
  # messier without resets, and the TP learns quicker with resets.
  tp.reset()
  

#######################################################################
#
# Step 3: send the same sequence of vectors and look at predictions made by
# temporal pooler
for j in range(5):
  print "\n\n--------","ABCDE"[j],"-----------"
  print "Raw input vector\n",formatRow(x[j])
  
  # Send each vector to the TP, with learning turned off
  tp.compute(x[j], enableLearn = False, computeInfOutput = True)
  
  # This method prints out the active state of each cell followed by the
  # predicted state of each cell. For convenience the cells are grouped
Example #7
0
# Step 3: send this simple sequence to the temporal pooler for learning
# We repeat the sequence 10 times
for i in range(10):

    # Send each letter in the sequence in order
    for j in range(5):

        # The compute method performs one step of learning and/or inference. Note:
        # here we just perform learning but you can perform prediction/inference and
        # learning in the same step if you want (online learning).
        tp.compute(x[j], enableLearn=True, computeInfOutput=False)

    # The reset command tells the TP that a sequence just ended and essentially
    # zeros out all the states. It is not strictly necessary but it's a bit
    # messier without resets, and the TP learns quicker with resets.
    tp.reset()

#######################################################################
#
# Step 3: send the same sequence of vectors and look at predictions made by
# temporal pooler
for j in range(5):
    print "\n\n--------", "ABCDE"[j], "-----------"
    print "Raw input vector\n", formatRow(x[j])

    # Send each vector to the TP, with learning turned off
    tp.compute(x[j], enableLearn=False, computeInfOutput=True)

    # This method prints out the active state of each cell followed by the
    # predicted state of each cell. For convenience the cells are grouped
    # 10 at a time. When there are multiple cells per column the printout
class Model():


  def __init__(self,
               numberOfCols=20480, cellsPerColumn=8,
                initialPerm=0.5, connectedPerm=0.5,
                minThreshold=164, newSynapseCount=164,
                permanenceInc=0.1, permanenceDec=0.0,
                activationThreshold=164,
                pamLength=10,
                checkpointDir=None):

    self.tp = TP(numberOfCols=numberOfCols, cellsPerColumn=cellsPerColumn,
                initialPerm=initialPerm, connectedPerm=connectedPerm,
                minThreshold=minThreshold, newSynapseCount=newSynapseCount,
                permanenceInc=permanenceInc, permanenceDec=permanenceDec,
                
                # 1/2 of the on bits = (16384 * .02) / 2
                activationThreshold=activationThreshold,
                globalDecay=0, burnIn=1,
                checkSynapseConsistency=False,
                pamLength=pamLength)

    self.phonemes=[
                  "AA",
                  "AE",
                  "AH",
                  "AO",
                  "AW",
                  "AY",
                  "B",
                  "CH",
                  "D",
                  "DH",
                  "EH",
                  "ER",
                  "EY",
                  "F",
                  "G",
                  "HH",
                  "IH",
                  "IY",
                  "JH",
                  "K",
                  "L",
                  "M",
                  "N",
                  "NG",
                  "OW",
                  "OY",
                  "P",
                  "R",
                  "S",
                  "SH",
                  "T",
                  "TH",
                  "UH",
                  "UW",
                  "V",
                  "W",
                  "Y",
                  "Z",
                  "ZH",
                  "SIL"
                ]

    self.checkpointDir = checkpointDir
    self.checkpointPklPath = None
    self.checkpointDataPath = None
    self._initCheckpoint()


  def _initCheckpoint(self):
    if self.checkpointDir:
      if not os.path.exists(self.checkpointDir):
        os.makedirs(self.checkpointDir)

      self.checkpointPklPath = self.checkpointDir + "/model.pkl"
      self.checkpointDataPath = self.checkpointDir + "/model.data"


  def canCheckpoint(self):
    return self.checkpointDir != None


  def hasCheckpoint(self):
    return (os.path.exists(self.checkpointPklPath) and
            os.path.exists(self.checkpointDataPath))


  def load(self):
    if not self.checkpointDir:
      raise(Exception("No checkpoint directory specified"))

    if not self.hasCheckpoint():
      raise(Exception("Could not find checkpoint file"))
      
    with open(self.checkpointPklPath, 'rb') as f:
      self.tp = pickle.load(f)

    self.tp.loadFromFile(self.checkpointDataPath)


  def save(self):
    if not self.checkpointDir:
      raise(Exception("No checkpoint directory specified"))

    self.tp.saveToFile(self.checkpointDataPath)

    with open(self.checkpointPklPath, 'wb') as f:
      pickle.dump(self.tp, f)


  def feedTermAndPhonemes(self, term, phonemes_arr, learn=True):
    """ Feed a Term to model, returning next predicted Term """
    tp = self.tp
    array = term.toArray()
    array += self.phonemeToBytes(phonemes_arr)
    array = numpy.array(array, dtype="uint32")
    tp.compute(array, enableLearn = learn, computeInfOutput = True)

    predictedCells = tp.getPredictedState()
    predictedColumns = predictedCells.max(axis=1)

    # get only the first 16384 bits back
    
    predictedBitmap = predictedColumns[:16384].nonzero()[0].tolist()
    return Term().createFromBitmap(predictedBitmap)
  

  def resetSequence(self):
    self.tp.reset()

  def phonemeToBytes(self, phonemes_arr):
    """
    param: python array of phonemes
    ex: ["AA", "L", "OW"]
    """
    phonemes_bytes = []
    for i in range(0, 4):
      if i < len(phonemes_arr):
        for j in range(0, len(self.phonemes)):
          if phonemes_arr[i] == self.phonemes[j]:
            phonemes_bytes += [1] * int(1024/len(self.phonemes))
          else:
            phonemes_bytes += [0] * int(1024/len(self.phonemes))
      else:
        phonemes_bytes += [0] * 1024
    return phonemes_bytes
Example #9
0
def main():

    # create Temporal Pooler instance
    tp = TP(numberOfCols=50,           # カラム数
            cellsPerColumn=2,          # 1カラム中のセル数
            initialPerm=0.5,           # initial permanence
            connectedPerm=0.5,         # permanence の閾値
            minThreshold=10,           # 末梢樹状セグメントの閾値の下限?
            newSynapseCount=10,        # ?
            permanenceInc=0.1,         # permanenceの増加
            permanenceDec=0.0,         # permanenceの減少
            activationThreshold=8,     # synapseの発火がこれ以上かを確認している.
            globalDecay=0,             # decrease permanence?
            burnIn=1,                  # Used for evaluating the prediction score
            checkSynapseConsistency=False,
            pamLength=10               # Number of time steps
            )

    # create input vectors to feed to the temporal pooler.
    # Each input vector must be numberOfCols wide.
    # Here we create a simple sequence of 5 vectors
    # representing the sequence A -> B -> C -> D -> E
    x = numpy.zeros((5,tp.numberOfCols), dtype="uint32")
    x[0, 0:10] = 1     # A
    x[1,10:20] = 1     # B
    x[2,20:30] = 1     # C
    x[3,30:40] = 1     # D
    x[4,40:50] = 1     # E

    print x


    # repeat the sequence 10 times
    for i in range(10):
        # Send each letter in the sequence in order
        # A -> B -> C -> D -> E
        print
        print
        print '#### :', i
        for j in range(5):
            tp.compute(x[j], enableLearn = True, computeInfOutput=True)
            #tp.printCells(predictedOnly=False)
            tp.printStates(printPrevious = False, printLearnState = False)

        # sequenceの最後を教える. 絶対必要なわけではないが, あった方が学習速い.
        tp.reset()


    for j in range(5):
        print "\n\n--------","ABCDE"[j],"-----------"
        print "Raw input vector\n",formatRow(x[j])

        # Send each vector to the TP, with learning turned off
        tp.compute(x[j], enableLearn = False, computeInfOutput = True)

        # print predict state
        print "\nAll the active and predicted cells:"
        tp.printStates(printPrevious = False, printLearnState = False)

        # get predict state
        print "\n\nThe following columns are predicted by the temporal pooler. This"
        print "should correspond to columns in the *next* item in the sequence."
        predictedCells = tp.getPredictedState()
        print formatRow(predictedCells.max(axis=1).nonzero())