示例#1
0
     ApicalTMPairMonitorMixin)


class MonitoredApicalTiebreakPairMemory(
  ApicalTMPairMonitorMixin, ApicalTiebreakPairMemory):
  pass


maxLength = 20
numTrainSequence = 50
numTestSequence = 50
rptPerSeq = 5

n = 2048
w = 40
enc = SDRCategoryEncoder(n, w, categoryList)

sdr_dict = dict()
for i in xrange(len(categoryList)):
  sdr = enc.encode(categoryList[i])
  activeCols = set(np.where(sdr)[0])
  sdr_dict[categoryList[i]] = activeCols

def initializeTM():
  DEFAULT_TEMPORAL_MEMORY_PARAMS = {"columnDimensions": (2048,),
                                    "cellsPerColumn": 32,
                                    "activationThreshold": 15,
                                    "initialPermanence": 0.41,
                                    "connectedPermanence": 0.5,
                                    "minThreshold": 10,
                                    "maxNewSynapseCount": 20,
示例#2
0
def getDescription(datasets):

    # ========================================================================
    # Network definition

    # Encoder for the sensor
    encoder = MultiEncoder()
    if 'filenameCategory' in datasets:
        categories = [x.strip() for x in open(datasets['filenameCategory'])]
    else:
        categories = [chr(x + ord('a')) for x in range(26)]

    if config['overlappingPatterns']:
        encoder.addEncoder(
            "name",
            SDRCategoryEncoder(n=200,
                               w=config['spNumActivePerInhArea'],
                               categoryList=categories,
                               name="name"))
    else:
        encoder.addEncoder(
            "name",
            CategoryEncoder(w=config['spNumActivePerInhArea'],
                            categoryList=categories,
                            name="name"))

    # ------------------------------------------------------------------
    # Node params
    # The inputs are long, horizontal vectors
    inputDimensions = (1, encoder.getWidth())

    # Layout the coincidences vertically stacked on top of each other, each
    # looking at the entire input field.
    columnDimensions = (config['spCoincCount'], 1)

    # If we have disableSpatial, then set the number of "coincidences" to be the
    #  same as the encoder width
    if config['disableSpatial']:
        columnDimensions = (encoder.getWidth(), 1)
        config['trainSP'] = 0

    sensorParams = dict(
        # encoder/datasource are not parameters so don't include here
        verbosity=config['sensorVerbosity'])

    CLAParams = dict(
        # SP params
        disableSpatial=config['disableSpatial'],
        inputDimensions=inputDimensions,
        columnDimensions=columnDimensions,
        potentialRadius=inputDimensions[1] / 2,
        potentialPct=1.00,
        gaussianDist=0,
        commonDistributions=0,  # should be False if possibly not training
        localAreaDensity=-1,  #0.05, 
        numActiveColumnsPerInhArea=config['spNumActivePerInhArea'],
        dutyCyclePeriod=1000,
        stimulusThreshold=1,
        synPermInactiveDec=0.11,
        synPermActiveInc=0.11,
        synPermActiveSharedDec=0.0,
        synPermOrphanDec=0.0,
        minPctDutyCycleBeforeInh=0.001,
        minPctDutyCycleAfterInh=0.001,
        spVerbosity=config['spVerbosity'],
        spSeed=1,
        printPeriodicStats=int(config['spPrintPeriodicStats']),

        # TM params
        tpSeed=1,
        disableTemporal=0 if config['trainTP'] else 1,
        temporalImp=config['temporalImp'],
        nCellsPerCol=config['tpNCellsPerCol'] if config['trainTP'] else 1,
        collectStats=1,
        burnIn=2,
        verbosity=config['tpVerbosity'],
        newSynapseCount=config['spNumActivePerInhArea'],
        minThreshold=config['spNumActivePerInhArea'],
        activationThreshold=config['spNumActivePerInhArea'],
        initialPerm=config['tpInitialPerm'],
        connectedPerm=0.5,
        permanenceInc=config['tpPermanenceInc'],
        permanenceDec=config['tpPermanenceDec'],  # perhaps tune this
        globalDecay=config['tpGlobalDecay'],
        pamLength=config['tpPAMLength'],
        maxSeqLength=config['tpMaxSeqLength'],
        maxAge=config['tpMaxAge'],

        # General params
        computeTopDown=config['computeTopDown'],
        trainingStep='spatial',
    )

    dataSource = FileRecordStream(datasets['filenameTrain'])

    description = dict(
        options=dict(logOutputsDuringInference=False, ),
        network=dict(sensorDataSource=dataSource,
                     sensorEncoder=encoder,
                     sensorParams=sensorParams,
                     CLAType='py.CLARegion',
                     CLAParams=CLAParams,
                     classifierType=None,
                     classifierParams=None),
    )

    if config['trainSP']:
        description['spTrain'] = dict(
            iterationCount=config['iterationCountTrain'],
            #iter=displaySPCoincidences(50),
            #finish=printSPCoincidences()
        ),
    else:
        description['spTrain'] = dict(
            # need to train with one iteration just to initialize data structures
            iterationCount=1)

    if config['trainTP']:
        description['tpTrain'] = []
        for i in range(config['trainTPRepeats']):
            stepDict = dict(
                name='step_%d' % (i),
                setup=sensorRewind,
                iterationCount=config['iterationCountTrain'],
            )
            if config['tpTimingEvery'] > 0:
                stepDict['iter'] = printTPTiming(config['tpTimingEvery'])
                stepDict['finish'] = [printTPTiming(), printTPCells]

            description['tpTrain'].append(stepDict)

    # ----------------------------------------------------------------------------
    # Inference tests
    inferSteps = []

    if config['evalTrainingSetNumIterations'] > 0:
        # The training set. Used to train the n-grams.
        inferSteps.append(
            dict(
                name='confidenceTrain_baseline',
                iterationCount=min(config['evalTrainingSetNumIterations'],
                                   config['iterationCountTrain']),
                ppOptions=dict(
                    verbosity=config['ppVerbosity'],
                    printLearnedCoincidences=True,
                    nGrams='train',
                    #ipsDetailsFor = "name,None,2",
                ),
                #finish=printTPCells,
            ))

        # Testing the training set on both the TM and n-grams.
        inferSteps.append(
            dict(
                name='confidenceTrain_nonoise',
                iterationCount=min(config['evalTrainingSetNumIterations'],
                                   config['iterationCountTrain']),
                setup=[sensorOpen(datasets['filenameTrain'])],
                ppOptions=dict(
                    verbosity=config['ppVerbosity'],
                    printLearnedCoincidences=False,
                    nGrams='test',
                    burnIns=[1, 2, 3, 4],
                    #ipsDetailsFor = "name,None,2",
                    #ipsAt = [1,2,3,4],
                ),
            ))

        # The test set
    if True:
        if datasets['filenameTest'] != datasets['filenameTrain']:
            inferSteps.append(
                dict(
                    name='confidenceTest_baseline',
                    iterationCount=config['iterationCountTest'],
                    setup=[sensorOpen(datasets['filenameTest'])],
                    ppOptions=dict(
                        verbosity=config['ppVerbosity'],
                        printLearnedCoincidences=False,
                        nGrams='test',
                        burnIns=[1, 2, 3, 4],
                        #ipsAt = [1,2,3,4],
                        ipsDetailsFor="name,None,2",
                    ),
                ))

    description['infer'] = inferSteps

    return description
示例#3
0
    def testMultiEncoder(self):
        """Testing MultiEncoder..."""

        e = MultiEncoder()

        # should be 7 bits wide
        # use of forced=True is not recommended, but here for readibility, see
        # scalar.py
        e.addEncoder(
            "dow",
            ScalarEncoder(w=3,
                          resolution=1,
                          minval=1,
                          maxval=8,
                          periodic=True,
                          name="day of week",
                          forced=True))
        # sould be 14 bits wide
        e.addEncoder(
            "myval",
            ScalarEncoder(w=5,
                          resolution=1,
                          minval=1,
                          maxval=10,
                          periodic=False,
                          name="aux",
                          forced=True))
        self.assertEqual(e.getWidth(), 21)
        self.assertEqual(e.getDescription(), [("day of week", 0), ("aux", 7)])

        d = DictObj(dow=3, myval=10)
        expected = numpy.array([0, 1, 1, 1, 0, 0, 0] +
                               [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
                               dtype="uint8")
        output = e.encode(d)
        self.assertTrue(numpy.array_equal(expected, output))

        # Check decoding
        decoded = e.decode(output)
        self.assertEqual(len(decoded), 2)
        (ranges, _) = decoded[0]["aux"]
        self.assertEqual(len(ranges), 1)
        self.assertTrue(numpy.array_equal(ranges[0], [10, 10]))
        (ranges, _) = decoded[0]["day of week"]
        self.assertTrue(
            len(ranges) == 1 and numpy.array_equal(ranges[0], [3, 3]))

        e.addEncoder(
            "myCat",
            SDRCategoryEncoder(n=7,
                               w=3,
                               categoryList=["run", "pass", "kick"],
                               forced=True))

        d = DictObj(dow=4, myval=6, myCat="pass")
        output = e.encode(d)
        topDownOut = e.topDownCompute(output)
        self.assertAlmostEqual(topDownOut[0].value, 4.5)
        self.assertEqual(topDownOut[1].value, 6.0)
        self.assertEqual(topDownOut[2].value, "pass")
        self.assertEqual(topDownOut[2].scalar, 2)
        self.assertEqual(topDownOut[2].encoding.sum(), 3)
示例#4
0
文件: description.py 项目: zacg/nupic
def getDescription(datasets):

  # ========================================================================
  # Encoder for the sensor
  encoder = MultiEncoder()

  if config['encodingFieldStyleA'] == 'contiguous':
    encoder.addEncoder('fieldA', ScalarEncoder(w=config['encodingOnBitsA'],
                        n=config['encodingFieldWidthA'], minval=0,
                        maxval=config['numAValues'], periodic=True, name='fieldA'))
  elif config['encodingFieldStyleA'] == 'sdr':
    encoder.addEncoder('fieldA', SDRCategoryEncoder(w=config['encodingOnBitsA'],
                        n=config['encodingFieldWidthA'],
                        categoryList=range(config['numAValues']), name='fieldA'))
  else:
    assert False


  if config['encodingFieldStyleB'] == 'contiguous':
    encoder.addEncoder('fieldB', ScalarEncoder(w=config['encodingOnBitsB'], 
                      n=config['encodingFieldWidthB'], minval=0, 
                      maxval=config['numBValues'], periodic=True, name='fieldB'))
  elif config['encodingFieldStyleB'] == 'zero':
    encoder.addEncoder('fieldB', SDRRandomEncoder(w=0, n=config['encodingFieldWidthB'], 
                      name='fieldB'))
  elif config['encodingFieldStyleB'] == 'sdr':
    encoder.addEncoder('fieldB', SDRCategoryEncoder(w=config['encodingOnBitsB'], 
                      n=config['encodingFieldWidthB'], 
                      categoryList=range(config['numBValues']), name='fieldB'))
  else:
    assert False



  # ========================================================================
  # Network definition


  # ------------------------------------------------------------------
  # Node params
  # The inputs are long, horizontal vectors
  inputShape = (1, encoder.getWidth())

  # Layout the coincidences vertically stacked on top of each other, each
  # looking at the entire input field. 
  coincidencesShape = (config['spCoincCount'], 1)
  inputBorder = inputShape[1]/2
  if inputBorder*2 >= inputShape[1]:
    inputBorder -= 1

  sensorParams = dict(
    # encoder/datasource are not parameters so don't include here
    verbosity=config['sensorVerbosity']
  )

  CLAParams = dict(
    inputShape = inputShape,
    inputBorder = inputBorder,
    coincidencesShape = coincidencesShape,
    coincInputRadius = inputShape[1]/2,
    coincInputPoolPct = 1.0,
    gaussianDist = 0,
    commonDistributions = 0,    # should be False if possibly not training
    localAreaDensity = -1, #0.05, 
    numActivePerInhArea = config['spNumActivePerInhArea'],
    dutyCyclePeriod = 1000,
    stimulusThreshold = 1,
    synPermInactiveDec = config['spSynPermInactiveDec'],
    synPermActiveInc = 0.02,
    synPermActiveSharedDec=0.0,
    synPermOrphanDec = 0.0,
    minPctDutyCycleBeforeInh = 0.001,
    minPctDutyCycleAfterInh = config['spMinPctDutyCycleAfterInh'],
    minDistance = 0.05,
    computeTopDown = 1,
    spVerbosity = config['spVerbosity'],
    spSeed = 1,
    printPeriodicStats = int(config['spPeriodicStats']),

    # TP params
    disableTemporal = 1,

    # General params
    trainingStep = 'spatial',
    )

  trainingDataSource = FileRecordStream(datasets['trainingFilename'])


  description = dict(
    options = dict(
      logOutputsDuringInference = False,
    ),

    network = dict(
      sensorDataSource = trainingDataSource,
      sensorEncoder = encoder, 
      sensorParams = sensorParams,

      CLAType = 'py.CLARegion',
      CLAParams = CLAParams,

      classifierType = None,
      classifierParams = None),

  )

  if config['trainSP']:
    description['spTrain'] = dict(
      iterationCount=config['iterationCount'], 
      #iter=displaySPCoincidences(50),
      finish=printSPCoincidences()
      ),
  else:
    description['spTrain'] = dict(
      # need to train with one iteration just to initialize data structures
      iterationCount=1)



  # ============================================================================
  # Inference tests
  inferSteps = []

  # ----------------------------------------
  # Training dataset
  if True:
    datasetName = 'bothTraining'
    inferSteps.append(
      dict(name = '%s_baseline' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['trainingFilename'])],
           ppOptions = dict(printLearnedCoincidences=True),
          )
      )

    inferSteps.append(
      dict(name = '%s_acc' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['trainingFilename'])],
           ppOptions = dict(onlyClassificationAcc=True,
                            tpActivationThresholds=config['tpActivationThresholds'],
                            computeDistances=True,
                            verbosity = 1),
          )
      )

  # ----------------------------------------
  # Testing dataset
  if 'testingFilename' in datasets:
    datasetName = 'bothTesting'
    inferSteps.append(
      dict(name = '%s_baseline' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['testingFilename'])],
           ppOptions = dict(printLearnedCoincidences=False),
          )
      )

    inferSteps.append(
      dict(name = '%s_acc' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['testingFilename'])],
           ppOptions = dict(onlyClassificationAcc=True,
                            tpActivationThresholds=config['tpActivationThresholds']),
          )
      )


  description['infer'] = inferSteps

  return description