示例#1
0
    def __createModel(self, expDir):
        # -----------------------------------------------------------------------
        # Load the experiment's description.py module
        descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
            expDir)
        expIface = helpers.getExperimentDescriptionInterfaceFromModule(
            descriptionPyModule)

        # -----------------------------------------------------------------------
        # Construct the model instance
        modelDescription = expIface.getModelDescription()
        return ModelFactory.create(modelDescription)
示例#2
0
  def __createModel(self, expDir):
    # -----------------------------------------------------------------------
    # Load the experiment's description.py module
    descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
      expDir)
    expIface = helpers.getExperimentDescriptionInterfaceFromModule(
      descriptionPyModule)


    # -----------------------------------------------------------------------
    # Construct the model instance
    modelDescription = expIface.getModelDescription()
    return ModelFactory.create(modelDescription)
  def test_sub_experiment_override(self):
    expDir = g_myEnv.getOpfExperimentPath("gym")
    module = loadExperimentDescriptionScriptFromDir(expDir)

    expIface = getExperimentDescriptionInterfaceFromModule(module)

    modelDesc = expIface.getModelDescription()

    tpActivationThreshold = modelDesc['modelParams'] \
        ['tmParams']['activationThreshold']

    expectedValue = 12
    self.assertEqual(tpActivationThreshold, expectedValue,
                     "Expected tm activationThreshold=%s, but got %s" % (
                      expectedValue, tpActivationThreshold))
示例#4
0
def getSwarmModelParams(modelID):
    """Retrieve the Engine-level model params from a Swarm model

  Args:
    modelID - Engine-level model ID of the Swarm model

  Returns:
    JSON-encoded string containing Model Params
  """

    # TODO: the use of nupic.frameworks.opf.helpers.loadExperimentDescriptionScriptFromDir when
    #  retrieving module params results in a leakage of pf_base_descriptionNN and
    #  pf_descriptionNN module imports for every call to getSwarmModelParams, so
    #  the leakage is unlimited when getSwarmModelParams is called by a
    #  long-running process.  An alternate solution is to execute the guts of
    #  this function's logic in a seprate process (via multiprocessing module).

    cjDAO = ClientJobsDAO.get()

    (jobID, description) = cjDAO.modelsGetFields(modelID,
                                                 ["jobId", "genDescription"])

    (baseDescription, ) = cjDAO.jobGetFields(jobID, ["genBaseDescription"])

    # Construct a directory with base.py and description.py for loading model
    # params, and use nupic.frameworks.opf.helpers to extract model params from
    # those files
    descriptionDirectory = tempfile.mkdtemp()
    try:
        baseDescriptionFilePath = os.path.join(descriptionDirectory, "base.py")
        with open(baseDescriptionFilePath, mode="wb") as f:
            f.write(baseDescription)

        descriptionFilePath = os.path.join(descriptionDirectory,
                                           "description.py")
        with open(descriptionFilePath, mode="wb") as f:
            f.write(description)

        expIface = helpers.getExperimentDescriptionInterfaceFromModule(
            helpers.loadExperimentDescriptionScriptFromDir(
                descriptionDirectory))

        return json.dumps(
            dict(modelConfig=expIface.getModelDescription(),
                 inferenceArgs=expIface.getModelControl().get(
                     "inferenceArgs", None)))
    finally:
        shutil.rmtree(descriptionDirectory, ignore_errors=True)
示例#5
0
文件: api.py 项目: Erichy94/nupic
def getSwarmModelParams(modelID):
  """Retrieve the Engine-level model params from a Swarm model

  Args:
    modelID - Engine-level model ID of the Swarm model

  Returns:
    JSON-encoded string containing Model Params
  """

  # TODO: the use of nupic.frameworks.opf.helpers.loadExperimentDescriptionScriptFromDir when
  #  retrieving module params results in a leakage of pf_base_descriptionNN and
  #  pf_descriptionNN module imports for every call to getSwarmModelParams, so
  #  the leakage is unlimited when getSwarmModelParams is called by a
  #  long-running process.  An alternate solution is to execute the guts of
  #  this function's logic in a seprate process (via multiprocessing module).

  cjDAO = ClientJobsDAO.get()

  (jobID, description) = cjDAO.modelsGetFields(
    modelID,
    ["jobId", "genDescription"])

  (baseDescription,) = cjDAO.jobGetFields(jobID, ["genBaseDescription"])

  # Construct a directory with base.py and description.py for loading model
  # params, and use nupic.frameworks.opf.helpers to extract model params from
  # those files
  descriptionDirectory = tempfile.mkdtemp()
  try:
    baseDescriptionFilePath = os.path.join(descriptionDirectory, "base.py")
    with open(baseDescriptionFilePath, mode="wb") as f:
      f.write(baseDescription)

    descriptionFilePath = os.path.join(descriptionDirectory, "description.py")
    with open(descriptionFilePath, mode="wb") as f:
      f.write(description)

    expIface = helpers.getExperimentDescriptionInterfaceFromModule(
      helpers.loadExperimentDescriptionScriptFromDir(descriptionDirectory))

    return json.dumps(
      dict(
        modelConfig=expIface.getModelDescription(),
        inferenceArgs=expIface.getModelControl().get("inferenceArgs", None)))
  finally:
    shutil.rmtree(descriptionDirectory, ignore_errors=True)
示例#6
0
def _runExperimentImpl(options, model=None):
  """Creates and runs the experiment

  Args:
    options: namedtuple ParseCommandLineOptionsResult
    model: For testing: may pass in an existing OPF Model instance
        to use instead of creating a new one.

  Returns: reference to OPFExperiment instance that was constructed (this
      is provided to aid with debugging) or None, if none was
      created.
  """
  json_helpers.validate(options.privateOptions,
                        schemaDict=g_parsedPrivateCommandLineOptionsSchema)

  # Load the experiment's description.py module
  experimentDir = options.experimentDir
  descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
      experimentDir)
  expIface = helpers.getExperimentDescriptionInterfaceFromModule(
      descriptionPyModule)

  # Handle "list checkpoints" request
  if options.privateOptions['listAvailableCheckpoints']:
    _printAvailableCheckpoints(experimentDir)
    return None

  # Load experiment tasks
  experimentTasks = expIface.getModelControl().get('tasks', [])

  # If the tasks list is empty, and this is a nupic environment description
  # file being run from the OPF, convert it to a simple OPF description file.
  if (len(experimentTasks) == 0 and
      expIface.getModelControl()['environment'] == OpfEnvironment.Nupic):
    expIface.convertNupicEnvToOPF()
    experimentTasks = expIface.getModelControl().get('tasks', [])

  # Ensures all the source locations are either absolute paths or relative to
  # the nupic.datafiles package_data location.
  expIface.normalizeStreamSources()

  # Extract option
  newSerialization = options.privateOptions['newSerialization']

  # Handle listTasks
  if options.privateOptions['listTasks']:
    print "Available tasks:"

    for label in [t['taskLabel'] for t in experimentTasks]:
      print "\t", label

    return None

  # Construct the experiment instance
  if options.privateOptions['runCheckpointName']:

    assert model is None

    checkpointName = options.privateOptions['runCheckpointName']

    model = ModelFactory.loadFromCheckpoint(
          savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName),
          newSerialization=newSerialization)

  elif model is not None:
    print "Skipping creation of OPFExperiment instance: caller provided his own"
  else:
    modelDescription = expIface.getModelDescription()
    model = ModelFactory.create(modelDescription)

  # Handle "create model" request
  if options.privateOptions['createCheckpointName']:
    checkpointName = options.privateOptions['createCheckpointName']
    _saveModel(model=model,
               experimentDir=experimentDir,
               checkpointLabel=checkpointName,
               newSerialization=newSerialization)

    return model

  # Build the task list

  # Default task execution index list is in the natural list order of the tasks
  taskIndexList = range(len(experimentTasks))

  customTaskExecutionLabelsList = options.privateOptions['taskLabels']
  if customTaskExecutionLabelsList:
    taskLabelsList = [t['taskLabel'] for t in experimentTasks]
    taskLabelsSet = set(taskLabelsList)

    customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)

    assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), \
           ("Some custom-provided task execution labels don't correspond "
            "to actual task labels: mismatched labels: %r; actual task "
            "labels: %r.") % (customTaskExecutionLabelsSet - taskLabelsSet,
                              customTaskExecutionLabelsList)

    taskIndexList = [taskLabelsList.index(label) for label in
                     customTaskExecutionLabelsList]

    print "#### Executing custom task list: %r" % [taskLabelsList[i] for
                                                   i in taskIndexList]

  # Run all experiment tasks
  for taskIndex in taskIndexList:

    task = experimentTasks[taskIndex]

    # Create a task runner and run it!
    taskRunner = _TaskRunner(model=model,
                             task=task,
                             cmdOptions=options)
    taskRunner.run()
    del taskRunner

    if options.privateOptions['checkpointModel']:
      _saveModel(model=model,
                 experimentDir=experimentDir,
                 checkpointLabel=task['taskLabel'],
                 newSerialization=newSerialization)

  return model
示例#7
0
    def run(self):
        """ Runs the OPF Model

    Parameters:
    -------------------------------------------------------------------------
    retval:  (completionReason, completionMsg)
              where completionReason is one of the ClientJobsDAO.CMPL_REASON_XXX
                equates.
    """
        # -----------------------------------------------------------------------
        # Load the experiment's description.py module
        descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
            self._experimentDir)
        expIface = helpers.getExperimentDescriptionInterfaceFromModule(
            descriptionPyModule)
        expIface.normalizeStreamSources()

        modelDescription = expIface.getModelDescription()
        self._modelControl = expIface.getModelControl()

        # -----------------------------------------------------------------------
        # Create the input data stream for this task
        streamDef = self._modelControl['dataset']

        from nupic.data.stream_reader import StreamReader
        readTimeout = 0

        self._inputSource = StreamReader(streamDef,
                                         isBlocking=False,
                                         maxTimeout=readTimeout)

        # -----------------------------------------------------------------------
        #Get field statistics from the input source
        fieldStats = self._getFieldStats()
        # -----------------------------------------------------------------------
        # Construct the model instance
        self._model = ModelFactory.create(modelDescription)
        self._model.setFieldStatistics(fieldStats)
        self._model.enableLearning()
        self._model.enableInference(
            self._modelControl.get("inferenceArgs", None))

        # -----------------------------------------------------------------------
        # Instantiate the metrics
        self.__metricMgr = MetricsManager(
            self._modelControl.get('metrics', None),
            self._model.getFieldInfo(), self._model.getInferenceType())

        self.__loggedMetricPatterns = self._modelControl.get(
            "loggedMetrics", [])

        self._optimizedMetricLabel = self.__getOptimizedMetricLabel()
        self._reportMetricLabels = matchPatterns(self._reportKeyPatterns,
                                                 self._getMetricLabels())

        # -----------------------------------------------------------------------
        # Initialize periodic activities (e.g., for model result updates)
        self._periodic = self._initPeriodicActivities()

        # -----------------------------------------------------------------------
        # Create our top-level loop-control iterator
        numIters = self._modelControl.get('iterationCount', -1)

        # Are we asked to turn off learning for a certain # of iterations near the
        #  end?
        learningOffAt = None
        iterationCountInferOnly = self._modelControl.get(
            'iterationCountInferOnly', 0)
        if iterationCountInferOnly == -1:
            self._model.disableLearning()
        elif iterationCountInferOnly > 0:
            assert numIters > iterationCountInferOnly, "when iterationCountInferOnly " \
              "is specified, iterationCount must be greater than " \
              "iterationCountInferOnly."
            learningOffAt = numIters - iterationCountInferOnly

        self.__runTaskMainLoop(numIters, learningOffAt=learningOffAt)

        # -----------------------------------------------------------------------
        # Perform final operations for model
        self._finalize()

        return (self._cmpReason, None)
示例#8
0
def _runExperimentImpl(options, model=None):
  """Creates and runs the experiment

  Args:
    options: namedtuple ParseCommandLineOptionsResult
    model: For testing: may pass in an existing OPF Model instance
        to use instead of creating a new one.

  Returns: reference to OPFExperiment instance that was constructed (this
      is provided to aid with debugging) or None, if none was
      created.
  """
  json_helpers.validate(options.privateOptions,
                        schemaDict=g_parsedPrivateCommandLineOptionsSchema)

  # Load the experiment's description.py module
  experimentDir = options.experimentDir
  descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
      experimentDir)
  expIface = helpers.getExperimentDescriptionInterfaceFromModule(
      descriptionPyModule)

  # Handle "list checkpoints" request
  if options.privateOptions['listAvailableCheckpoints']:
    _printAvailableCheckpoints(experimentDir)
    return None

  # Load experiment tasks
  experimentTasks = expIface.getModelControl().get('tasks', [])

  # If the tasks list is empty, and this is a nupic environment description
  # file being run from the OPF, convert it to a simple OPF description file.
  if (len(experimentTasks) == 0 and
      expIface.getModelControl()['environment'] == OpfEnvironment.Nupic):
    expIface.convertNupicEnvToOPF()
    experimentTasks = expIface.getModelControl().get('tasks', [])

  # Ensures all the source locations are either absolute paths or relative to
  # the nupic.datafiles package_data location.
  expIface.normalizeStreamSources()

  # Extract option
  newSerialization = options.privateOptions['newSerialization']

  # Handle listTasks
  if options.privateOptions['listTasks']:
    print("Available tasks:")

    for label in [t['taskLabel'] for t in experimentTasks]:
      print("\t", label)

    return None

  # Construct the experiment instance
  if options.privateOptions['runCheckpointName']:

    assert model is None

    checkpointName = options.privateOptions['runCheckpointName']

    model = ModelFactory.loadFromCheckpoint(
          savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName),
          newSerialization=newSerialization)

  elif model is not None:
    print("Skipping creation of OPFExperiment instance: caller provided his own")
  else:
    modelDescription = expIface.getModelDescription()
    model = ModelFactory.create(modelDescription)

  # Handle "create model" request
  if options.privateOptions['createCheckpointName']:
    checkpointName = options.privateOptions['createCheckpointName']
    _saveModel(model=model,
               experimentDir=experimentDir,
               checkpointLabel=checkpointName,
               newSerialization=newSerialization)

    return model

  # Build the task list

  # Default task execution index list is in the natural list order of the tasks
  taskIndexList = list(range(len(experimentTasks)))

  customTaskExecutionLabelsList = options.privateOptions['taskLabels']
  if customTaskExecutionLabelsList:
    taskLabelsList = [t['taskLabel'] for t in experimentTasks]
    taskLabelsSet = set(taskLabelsList)

    customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)

    assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), \
           ("Some custom-provided task execution labels don't correspond "
            "to actual task labels: mismatched labels: %r; actual task "
            "labels: %r.") % (customTaskExecutionLabelsSet - taskLabelsSet,
                              customTaskExecutionLabelsList)

    taskIndexList = [taskLabelsList.index(label) for label in
                     customTaskExecutionLabelsList]

    print("#### Executing custom task list: %r" % [taskLabelsList[i] for
                                                   i in taskIndexList])

  # Run all experiment tasks
  for taskIndex in taskIndexList:

    task = experimentTasks[taskIndex]

    # Create a task runner and run it!
    taskRunner = _TaskRunner(model=model,
                             task=task,
                             cmdOptions=options)
    taskRunner.run()
    del taskRunner

    if options.privateOptions['checkpointModel']:
      _saveModel(model=model,
                 experimentDir=experimentDir,
                 checkpointLabel=task['taskLabel'],
                 newSerialization=newSerialization)

  return model