예제 #1
0
def getDescription(datasets):

    # ========================================================================
    # Encoder for the sensor
    encoder = MultiEncoder()

    if config["encodingFieldStyleA"] == "contiguous":
        encoder.addEncoder(
            "fieldA",
            ScalarEncoder(
                w=config["encodingOnBitsA"],
                n=config["encodingFieldWidthA"],
                minval=0,
                maxval=config["numAValues"],
                periodic=True,
                name="fieldA",
            ),
        )
    elif config["encodingFieldStyleA"] == "sdr":
        encoder.addEncoder(
            "fieldA",
            SDRCategoryEncoder(
                w=config["encodingOnBitsA"],
                n=config["encodingFieldWidthA"],
                categoryList=range(config["numAValues"]),
                name="fieldA",
            ),
        )
    else:
        assert False

    if config["encodingFieldStyleB"] == "contiguous":
        encoder.addEncoder(
            "fieldB",
            ScalarEncoder(
                w=config["encodingOnBitsB"],
                n=config["encodingFieldWidthB"],
                minval=0,
                maxval=config["numBValues"],
                periodic=True,
                name="fieldB",
            ),
        )
    elif config["encodingFieldStyleB"] == "sdr":
        encoder.addEncoder(
            "fieldB",
            SDRCategoryEncoder(
                w=config["encodingOnBitsB"],
                n=config["encodingFieldWidthB"],
                categoryList=range(config["numBValues"]),
                name="fieldB",
            ),
        )
    else:
        assert False

    # ========================================================================
    # Network definition

    # ------------------------------------------------------------------
    # Node params
    # The inputs are long, horizontal vectors
    inputShape = (1, encoder.getWidth())

    # Layout the coincidences vertically stacked on top of each other, each
    # looking at the entire input field.
    coincidencesShape = (config["spCoincCount"], 1)
    inputBorder = inputShape[1] / 2
    if inputBorder * 2 >= inputShape[1]:
        inputBorder -= 1

    sensorParams = dict(
        # encoder/datasource are not parameters so don't include here
        verbosity=config["sensorVerbosity"]
    )

    CLAParams = dict(
        inputShape=inputShape,
        inputBorder=inputBorder,
        coincidencesShape=coincidencesShape,
        coincInputRadius=inputShape[1] / 2,
        coincInputPoolPct=1.0,
        gaussianDist=0,
        commonDistributions=0,  # should be False if possibly not training
        localAreaDensity=-1,  # 0.05,
        numActivePerInhArea=config["spNumActivePerInhArea"],
        dutyCyclePeriod=1000,
        stimulusThreshold=1,
        synPermInactiveDec=config["spSynPermInactiveDec"],
        synPermActiveInc=0.02,
        synPermActiveSharedDec=0.0,
        synPermOrphanDec=0.0,
        minPctDutyCycleBeforeInh=0.001,
        minPctDutyCycleAfterInh=config["spMinPctDutyCycleAfterInh"],
        minDistance=0.05,
        computeTopDown=1,
        spVerbosity=config["spVerbosity"],
        spSeed=1,
        printPeriodicStats=int(config["spPeriodicStats"]),
        # TP params
        disableTemporal=1,
        # General params
        trainingStep="spatial",
    )

    trainingDataSource = FileRecordStream(datasets["trainingFilename"])

    description = dict(
        options=dict(logOutputsDuringInference=False),
        network=dict(
            sensorDataSource=trainingDataSource,
            sensorEncoder=encoder,
            sensorParams=sensorParams,
            CLAType="py.CLARegion",
            CLAParams=CLAParams,
            classifierType=None,
            classifierParams=None,
        ),
    )

    if config["trainSP"]:
        description["spTrain"] = (
            dict(
                iterationCount=config["iterationCount"],
                # iter=displaySPCoincidences(50),
                finish=printSPCoincidences(),
            ),
        )
    else:
        description["spTrain"] = dict(
            # need to train with one iteration just to initialize data structures
            iterationCount=1
        )

    # ============================================================================
    # Inference tests
    inferSteps = []

    # ----------------------------------------
    # Training dataset
    if True:
        datasetName = "bothTraining"
        inferSteps.append(
            dict(
                name="%s_baseline" % datasetName,
                iterationCount=config["iterationCount"],
                setup=[sensorOpen(datasets["trainingFilename"])],
                ppOptions=dict(printLearnedCoincidences=True),
            )
        )

        inferSteps.append(
            dict(
                name="%s_acc" % datasetName,
                iterationCount=config["iterationCount"],
                setup=[sensorOpen(datasets["trainingFilename"])],
                ppOptions=dict(
                    onlyClassificationAcc=True,
                    tpActivationThresholds=config["tpActivationThresholds"],
                    computeDistances=True,
                    verbosity=1,
                ),
            )
        )

    # ----------------------------------------
    # Testing dataset
    if "testingFilename" in datasets:
        datasetName = "bothTesting"
        inferSteps.append(
            dict(
                name="%s_baseline" % datasetName,
                iterationCount=config["iterationCount"],
                setup=[sensorOpen(datasets["testingFilename"])],
                ppOptions=dict(printLearnedCoincidences=False),
            )
        )

        inferSteps.append(
            dict(
                name="%s_acc" % datasetName,
                iterationCount=config["iterationCount"],
                setup=[sensorOpen(datasets["testingFilename"])],
                ppOptions=dict(onlyClassificationAcc=True, tpActivationThresholds=config["tpActivationThresholds"]),
            )
        )

    description["infer"] = inferSteps

    return description
예제 #2
0
파일: description.py 프로젝트: runt18/nupic
def getDescription(datasets):

  # ========================================================================
  # Encoder for the sensor
  encoder = MultiEncoder()

  if config['encodingFieldStyleA'] == 'contiguous':
    encoder.addEncoder('fieldA', ScalarEncoder(w=config['encodingOnBitsA'],
                        n=config['encodingFieldWidthA'], minval=0,
                        maxval=config['numAValues'], periodic=True, name='fieldA'))
  elif config['encodingFieldStyleA'] == 'sdr':
    encoder.addEncoder('fieldA', SDRCategoryEncoder(w=config['encodingOnBitsA'],
                        n=config['encodingFieldWidthA'],
                        categoryList=range(config['numAValues']), name='fieldA'))
  else:
    assert False


  if config['encodingFieldStyleB'] == 'contiguous':
    encoder.addEncoder('fieldB', ScalarEncoder(w=config['encodingOnBitsB'], 
                      n=config['encodingFieldWidthB'], minval=0, 
                      maxval=config['numBValues'], periodic=True, name='fieldB'))
  elif config['encodingFieldStyleB'] == 'sdr':
    encoder.addEncoder('fieldB', SDRCategoryEncoder(w=config['encodingOnBitsB'], 
                      n=config['encodingFieldWidthB'], 
                      categoryList=range(config['numBValues']), name='fieldB'))
  else:
    assert False



  # ========================================================================
  # Network definition


  # ------------------------------------------------------------------
  # Node params
  # The inputs are long, horizontal vectors
  inputDimensions = (1, encoder.getWidth())

  # Layout the coincidences vertically stacked on top of each other, each
  # looking at the entire input field. 
  columnDimensions = (config['spCoincCount'], 1)

  sensorParams = dict(
    # encoder/datasource are not parameters so don't include here
    verbosity=config['sensorVerbosity']
  )

  CLAParams = dict(
    inputDimensions = inputDimensions,
    columnDimensions = columnDimensions,
    potentialRadius = inputDimensions[1]/2,
    potentialPct = 1.0,
    gaussianDist = 0,
    commonDistributions = 0,    # should be False if possibly not training
    localAreaDensity = -1, #0.05, 
    numActiveColumnsPerInhArea = config['spNumActivePerInhArea'],
    dutyCyclePeriod = 1000,
    stimulusThreshold = 1,
    synPermInactiveDec = config['spSynPermInactiveDec'],
    synPermActiveInc = 0.02,
    synPermActiveSharedDec=0.0,
    synPermOrphanDec = 0.0,
    minPctDutyCycleBeforeInh = 0.001,
    minPctDutyCycleAfterInh = config['spMinPctDutyCycleAfterInh'],
    minDistance = 0.05,
    computeTopDown = 1,
    spVerbosity = config['spVerbosity'],
    spSeed = 1,
    printPeriodicStats = int(config['spPeriodicStats']),

    # TP params
    disableTemporal = 1,

    # General params
    trainingStep = 'spatial',
    )

  trainingDataSource = FileRecordStream(datasets['trainingFilename'])


  description = dict(
    options = dict(
      logOutputsDuringInference = False,
    ),

    network = dict(
      sensorDataSource = trainingDataSource,
      sensorEncoder = encoder, 
      sensorParams = sensorParams,

      CLAType = 'py.CLARegion',
      CLAParams = CLAParams,

      classifierType = None,
      classifierParams = None),

  )

  if config['trainSP']:
    description['spTrain'] = dict(
      iterationCount=config['iterationCount'], 
      #iter=displaySPCoincidences(50),
      finish=printSPCoincidences()
      ),
  else:
    description['spTrain'] = dict(
      # need to train with one iteration just to initialize data structures
      iterationCount=1)



  # ============================================================================
  # Inference tests
  inferSteps = []

  # ----------------------------------------
  # Training dataset
  if True:
    datasetName = 'bothTraining'
    inferSteps.append(
      dict(name = '{0!s}_baseline'.format(datasetName), 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['trainingFilename'])],
           ppOptions = dict(printLearnedCoincidences=True),
          )
      )

    inferSteps.append(
      dict(name = '{0!s}_acc'.format(datasetName), 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['trainingFilename'])],
           ppOptions = dict(onlyClassificationAcc=True,
                            tpActivationThresholds=config['tpActivationThresholds'],
                            computeDistances=True,
                            verbosity = 1),
          )
      )

  # ----------------------------------------
  # Testing dataset
  if 'testingFilename' in datasets:
    datasetName = 'bothTesting'
    inferSteps.append(
      dict(name = '{0!s}_baseline'.format(datasetName), 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['testingFilename'])],
           ppOptions = dict(printLearnedCoincidences=False),
          )
      )

    inferSteps.append(
      dict(name = '{0!s}_acc'.format(datasetName), 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['testingFilename'])],
           ppOptions = dict(onlyClassificationAcc=True,
                            tpActivationThresholds=config['tpActivationThresholds']),
          )
      )


  description['infer'] = inferSteps

  return description
예제 #3
0
def getDescription(datasets):
  encoder = MultiEncoder()
  encoder.addEncoder("date", DateEncoder(timeOfDay=3))
  encoder.addEncoder("amount", LogEncoder(name="amount", maxval=1000))
  for i in xrange(0, nRandomFields):
    s = ScalarEncoder(name="scalar", minval=0, maxval=randomFieldWidth, resolution=1, w=3)
    encoder.addEncoder("random%d" % i, s)

  dataSource = FunctionSource(generateFunction, dict(nRandomFields=nRandomFields,
                                                 randomFieldWidth=randomFieldWidth))

  inputShape = (1, encoder.getWidth())

  # Layout the coincidences vertically stacked on top of each other, each
  # looking at the entire input field.
  coincidencesShape = (nCoincidences, 1)
  # TODO: why do we need input border?
  inputBorder = inputShape[1]/2
  if inputBorder*2 >= inputShape[1]:
    inputBorder -= 1


  nodeParams = dict()

  spParams = dict(
        commonDistributions=0,
        inputShape = inputShape,
        inputBorder = inputBorder,
        coincidencesShape = coincidencesShape,
        coincInputRadius = inputShape[1]/2,
        coincInputPoolPct = 0.75,
        gaussianDist = 0,
        localAreaDensity = 0.10,
        # localAreaDensity = 0.04,
        numActivePerInhArea = -1,
        dutyCyclePeriod = 1000,
        stimulusThreshold = 5,
        synPermInactiveDec=0.08,
        # synPermInactiveDec=0.02,
        synPermActiveInc=0.02,
        synPermActiveSharedDec=0.0,
        synPermOrphanDec = 0.0,
        minPctDutyCycleBeforeInh = 0.05,
        # minPctDutyCycleAfterInh = 0.1,
        # minPctDutyCycleBeforeInh = 0.05,
        minPctDutyCycleAfterInh = 0.05,
        # minPctDutyCycleAfterInh = 0.4,
        seed = 1,
  )

  otherParams = dict(
    disableTemporal=1,
    trainingStep='spatial',

  )

  nodeParams.update(spParams)
  nodeParams.update(otherParams)

  def mySetupCallback(experiment):
    print "Setup function called"

  description = dict(
    options = dict(
      logOutputsDuringInference = False,
    ),

    network = dict(
      sensorDataSource = dataSource,
      sensorEncoder = encoder,
      CLAType = "py.CLARegion",
      CLAParams = nodeParams,
      classifierType = None,
      classifierParams = None),

    # step
    spTrain = dict(
      name="phase1",
      setup=mySetupCallback,
      iterationCount=5000,
      #iter=displaySPCoincidences(100),
      finish=printSPCoincidences()),

    tpTrain = None,        # same format as sptrain if non-empty

    infer = None,          # same format as sptrain if non-empty

  )

  return description
예제 #4
0
파일: description.py 프로젝트: zacg/nupic
def getDescription(datasets):

  # ========================================================================
  # Encoder for the sensor
  encoder = MultiEncoder()

  if config['encodingFieldStyleA'] == 'contiguous':
    encoder.addEncoder('fieldA', ScalarEncoder(w=config['encodingOnBitsA'],
                        n=config['encodingFieldWidthA'], minval=0,
                        maxval=config['numAValues'], periodic=True, name='fieldA'))
  elif config['encodingFieldStyleA'] == 'sdr':
    encoder.addEncoder('fieldA', SDRCategoryEncoder(w=config['encodingOnBitsA'],
                        n=config['encodingFieldWidthA'],
                        categoryList=range(config['numAValues']), name='fieldA'))
  else:
    assert False


  if config['encodingFieldStyleB'] == 'contiguous':
    encoder.addEncoder('fieldB', ScalarEncoder(w=config['encodingOnBitsB'], 
                      n=config['encodingFieldWidthB'], minval=0, 
                      maxval=config['numBValues'], periodic=True, name='fieldB'))
  elif config['encodingFieldStyleB'] == 'zero':
    encoder.addEncoder('fieldB', SDRRandomEncoder(w=0, n=config['encodingFieldWidthB'], 
                      name='fieldB'))
  elif config['encodingFieldStyleB'] == 'sdr':
    encoder.addEncoder('fieldB', SDRCategoryEncoder(w=config['encodingOnBitsB'], 
                      n=config['encodingFieldWidthB'], 
                      categoryList=range(config['numBValues']), name='fieldB'))
  else:
    assert False



  # ========================================================================
  # Network definition


  # ------------------------------------------------------------------
  # Node params
  # The inputs are long, horizontal vectors
  inputShape = (1, encoder.getWidth())

  # Layout the coincidences vertically stacked on top of each other, each
  # looking at the entire input field. 
  coincidencesShape = (config['spCoincCount'], 1)
  inputBorder = inputShape[1]/2
  if inputBorder*2 >= inputShape[1]:
    inputBorder -= 1

  sensorParams = dict(
    # encoder/datasource are not parameters so don't include here
    verbosity=config['sensorVerbosity']
  )

  CLAParams = dict(
    inputShape = inputShape,
    inputBorder = inputBorder,
    coincidencesShape = coincidencesShape,
    coincInputRadius = inputShape[1]/2,
    coincInputPoolPct = 1.0,
    gaussianDist = 0,
    commonDistributions = 0,    # should be False if possibly not training
    localAreaDensity = -1, #0.05, 
    numActivePerInhArea = config['spNumActivePerInhArea'],
    dutyCyclePeriod = 1000,
    stimulusThreshold = 1,
    synPermInactiveDec = config['spSynPermInactiveDec'],
    synPermActiveInc = 0.02,
    synPermActiveSharedDec=0.0,
    synPermOrphanDec = 0.0,
    minPctDutyCycleBeforeInh = 0.001,
    minPctDutyCycleAfterInh = config['spMinPctDutyCycleAfterInh'],
    minDistance = 0.05,
    computeTopDown = 1,
    spVerbosity = config['spVerbosity'],
    spSeed = 1,
    printPeriodicStats = int(config['spPeriodicStats']),

    # TP params
    disableTemporal = 1,

    # General params
    trainingStep = 'spatial',
    )

  trainingDataSource = FileRecordStream(datasets['trainingFilename'])


  description = dict(
    options = dict(
      logOutputsDuringInference = False,
    ),

    network = dict(
      sensorDataSource = trainingDataSource,
      sensorEncoder = encoder, 
      sensorParams = sensorParams,

      CLAType = 'py.CLARegion',
      CLAParams = CLAParams,

      classifierType = None,
      classifierParams = None),

  )

  if config['trainSP']:
    description['spTrain'] = dict(
      iterationCount=config['iterationCount'], 
      #iter=displaySPCoincidences(50),
      finish=printSPCoincidences()
      ),
  else:
    description['spTrain'] = dict(
      # need to train with one iteration just to initialize data structures
      iterationCount=1)



  # ============================================================================
  # Inference tests
  inferSteps = []

  # ----------------------------------------
  # Training dataset
  if True:
    datasetName = 'bothTraining'
    inferSteps.append(
      dict(name = '%s_baseline' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['trainingFilename'])],
           ppOptions = dict(printLearnedCoincidences=True),
          )
      )

    inferSteps.append(
      dict(name = '%s_acc' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['trainingFilename'])],
           ppOptions = dict(onlyClassificationAcc=True,
                            tpActivationThresholds=config['tpActivationThresholds'],
                            computeDistances=True,
                            verbosity = 1),
          )
      )

  # ----------------------------------------
  # Testing dataset
  if 'testingFilename' in datasets:
    datasetName = 'bothTesting'
    inferSteps.append(
      dict(name = '%s_baseline' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['testingFilename'])],
           ppOptions = dict(printLearnedCoincidences=False),
          )
      )

    inferSteps.append(
      dict(name = '%s_acc' % datasetName, 
           iterationCount = config['iterationCount'],
           setup = [sensorOpen(datasets['testingFilename'])],
           ppOptions = dict(onlyClassificationAcc=True,
                            tpActivationThresholds=config['tpActivationThresholds']),
          )
      )


  description['infer'] = inferSteps

  return description
예제 #5
0
def getDescription(datasets):
    encoder = MultiEncoder()
    encoder.addEncoder("date", DateEncoder(timeOfDay=3))
    encoder.addEncoder("amount", LogEncoder(name="amount", maxval=1000))
    for i in xrange(0, nRandomFields):
        s = ScalarEncoder(name="scalar",
                          minval=0,
                          maxval=randomFieldWidth,
                          resolution=1,
                          w=3)
        encoder.addEncoder("random%d" % i, s)

    dataSource = FunctionSource(
        generateFunction,
        dict(nRandomFields=nRandomFields, randomFieldWidth=randomFieldWidth))

    inputShape = (1, encoder.getWidth())

    # Layout the coincidences vertically stacked on top of each other, each
    # looking at the entire input field.
    coincidencesShape = (nCoincidences, 1)
    # TODO: why do we need input border?
    inputBorder = inputShape[1] / 2
    if inputBorder * 2 >= inputShape[1]:
        inputBorder -= 1

    nodeParams = dict()

    spParams = dict(
        commonDistributions=0,
        inputShape=inputShape,
        inputBorder=inputBorder,
        coincidencesShape=coincidencesShape,
        coincInputRadius=inputShape[1] / 2,
        coincInputPoolPct=0.75,
        gaussianDist=0,
        localAreaDensity=0.10,
        # localAreaDensity = 0.04,
        numActivePerInhArea=-1,
        dutyCyclePeriod=1000,
        stimulusThreshold=5,
        synPermInactiveDec=0.08,
        # synPermInactiveDec=0.02,
        synPermActiveInc=0.02,
        synPermActiveSharedDec=0.0,
        synPermOrphanDec=0.0,
        minPctDutyCycleBeforeInh=0.05,
        # minPctDutyCycleAfterInh = 0.1,
        # minPctDutyCycleBeforeInh = 0.05,
        minPctDutyCycleAfterInh=0.05,
        # minPctDutyCycleAfterInh = 0.4,
        seed=1,
    )

    otherParams = dict(
        disableTemporal=1,
        trainingStep='spatial',
    )

    nodeParams.update(spParams)
    nodeParams.update(otherParams)

    def mySetupCallback(experiment):
        print "Setup function called"

    description = dict(
        options=dict(logOutputsDuringInference=False, ),
        network=dict(sensorDataSource=dataSource,
                     sensorEncoder=encoder,
                     CLAType="py.CLARegion",
                     CLAParams=nodeParams,
                     classifierType=None,
                     classifierParams=None),

        # step
        spTrain=dict(
            name="phase1",
            setup=mySetupCallback,
            iterationCount=5000,
            #iter=displaySPCoincidences(100),
            finish=printSPCoincidences()),
        tpTrain=None,  # same format as sptrain if non-empty
        infer=None,  # same format as sptrain if non-empty
    )

    return description