Пример #1
0
    def __createModel(self, expDir):
        # -----------------------------------------------------------------------
        # Load the experiment's description.py module
        descriptionPyModule = opfhelpers.loadExperimentDescriptionScriptFromDir(
            expDir)
        expIface = opfhelpers.getExperimentDescriptionInterfaceFromModule(
            descriptionPyModule)

        # -----------------------------------------------------------------------
        # Construct the model instance
        modelDescription = expIface.getModelDescription()
        return ModelFactory.create(modelDescription)
Пример #2
0
  def __createModel(self, expDir):
    # -----------------------------------------------------------------------
    # Load the experiment's description.py module
    descriptionPyModule = opfhelpers.loadExperimentDescriptionScriptFromDir(
      expDir)
    expIface = opfhelpers.getExperimentDescriptionInterfaceFromModule(
      descriptionPyModule)


    # -----------------------------------------------------------------------
    # Construct the model instance
    modelDescription = expIface.getModelDescription()
    return ModelFactory.create(modelDescription)
Пример #3
0
def getSwarmModelParams(modelID):
    """Retrieve the Engine-level model params from a Swarm model
  
  Args:
    modelID - Engine-level model ID of the Swarm model
  
  Returns:
    JSON-encoded string containing Model Params
  """

    # TODO: the use of opfhelpers.loadExperimentDescriptionScriptFromDir when
    #  retrieving module params results in a leakage of pf_base_descriptionNN and
    #  pf_descriptionNN module imports for every call to getSwarmModelParams, so
    #  the leakage is unlimited when getSwarmModelParams is called by a
    #  long-running process. This issue is presently being
    #  tracked by the JIRA: https://issues.numenta.org/browse/NPC-225. An
    #  alternate solution is to execute the guts of this function's logic in a
    #  seprate process (via multiprocessing module).

    cjDAO = ClientJobsDAO.get()

    (jobID, description) = cjDAO.modelsGetFields(modelID,
                                                 ["jobId", "genDescription"])

    (baseDescription, ) = cjDAO.jobGetFields(jobID, ["genBaseDescription"])

    # Construct a directory with base.py and description.py for loading model
    # params, and use opfhelpers to extract model params from those files
    descriptionDirectory = tempfile.mkdtemp()
    try:
        baseDescriptionFilePath = os.path.join(descriptionDirectory, "base.py")
        with open(baseDescriptionFilePath, mode="wb") as f:
            f.write(baseDescription)

        descriptionFilePath = os.path.join(descriptionDirectory,
                                           "description.py")
        with open(descriptionFilePath, mode="wb") as f:
            f.write(description)

        expIface = opfhelpers.getExperimentDescriptionInterfaceFromModule(
            opfhelpers.loadExperimentDescriptionScriptFromDir(
                descriptionDirectory))

        return json.dumps(
            dict(modelConfig=expIface.getModelDescription(),
                 inferenceArgs=expIface.getModelControl().get(
                     "inferenceArgs", None)))
    finally:
        shutil.rmtree(descriptionDirectory, ignore_errors=True)
Пример #4
0
  def test_sub_experiment_override(self):
    expDir = g_myEnv.getOpfExperimentPath("gym")
    module = loadExperimentDescriptionScriptFromDir(expDir)

    expIface = getExperimentDescriptionInterfaceFromModule(module)

    modelDesc = expIface.getModelDescription()

    tpActivationThreshold = modelDesc['modelParams'] \
        ['tpParams']['activationThreshold']

    expectedValue = 12
    self.assertEqual(tpActivationThreshold, expectedValue,
                     "Expected tp activationThreshold=%s, but got %s" % (
                      expectedValue, tpActivationThreshold))
Пример #5
0
def getSwarmModelParams(modelID):
  """Retrieve the Engine-level model params from a Swarm model
  
  Args:
    modelID - Engine-level model ID of the Swarm model
  
  Returns:
    JSON-encoded string containing Model Params
  """
  
  # TODO: the use of opfhelpers.loadExperimentDescriptionScriptFromDir when
  #  retrieving module params results in a leakage of pf_base_descriptionNN and
  #  pf_descriptionNN module imports for every call to getSwarmModelParams, so
  #  the leakage is unlimited when getSwarmModelParams is called by a
  #  long-running process such as grok-api-server. This issue is presently being
  #  tracked by the JIRA: https://issues.numenta.org/browse/NPC-225. An
  #  alternate solution is to execute the guts of this function's logic in a
  #  seprate process (via multiprocessing module).
  
  cjDAO = ClientJobsDAO.get()
  
  (jobID, description) = cjDAO.modelsGetFields(
    modelID,
    ["jobId", "genDescription"])
  
  (baseDescription,) = cjDAO.jobGetFields(jobID, ["genBaseDescription"])
  
  # Construct a directory with base.py and description.py for loading model
  # params, and use opfhelpers to extract model params from those files
  descriptionDirectory = tempfile.mkdtemp()
  try:
    baseDescriptionFilePath = os.path.join(descriptionDirectory, "base.py")
    with open(baseDescriptionFilePath, mode="wb") as f:
      f.write(baseDescription)
    
    descriptionFilePath = os.path.join(descriptionDirectory, "description.py")
    with open(descriptionFilePath, mode="wb") as f:
      f.write(description)
    
    expIface = opfhelpers.getExperimentDescriptionInterfaceFromModule(
      opfhelpers.loadExperimentDescriptionScriptFromDir(descriptionDirectory))
    
    return json.dumps(
      dict(
        modelConfig=expIface.getModelDescription(),
        inferenceArgs=expIface.getModelControl().get("inferenceArgs", None)))
  finally:
    shutil.rmtree(descriptionDirectory, ignore_errors=True)
Пример #6
0
  def __getCurrentModelFromDir(self, expDir):
    """ Loads a description.py file from the specified directory, and sets it to
    be the current model """
    descriptionPyModule = \
                      opfhelpers.loadExperimentDescriptionScriptFromDir(expDir)
    expIface = \
      opfhelpers.getExperimentDescriptionInterfaceFromModule(descriptionPyModule)
    modelDescription = expIface.getModelDescription()
    modelDescription['predictedField'] = None
    modelDescription['modelParams']['clParams']['implementation'] = 'py'

    # Add model to global list
    self.models.append(modelDescription)
    self.currentModel = len(self.models) - 1
    self.control = expIface.getModelControl()

    modelDescription['predictedField'] = self.control['inferenceArgs']['predictedField']

    return expIface
Пример #7
0
def _runExperimentImpl(options, model=None):
    """Creates and runs the experiment

  Args:
    options: namedtuple ParseCommandLineOptionsResult
    model: For testing: may pass in an existing OPF Model instance
        to use instead of creating a new one.

  Returns: referece to OPFExperiment instance that was constructed (this
      is provided to aid with debugging) or None, if none was
      created.
  """
    jsonhelpers.validate(options.privateOptions, schemaDict=g_parsedPrivateCommandLineOptionsSchema)

    # Load the experiment's description.py module
    experimentDir = options.experimentDir
    descriptionPyModule = opfhelpers.loadExperimentDescriptionScriptFromDir(experimentDir)
    expIface = opfhelpers.getExperimentDescriptionInterfaceFromModule(descriptionPyModule)

    # Handle "list checkpoints" request
    if options.privateOptions["listAvailableCheckpoints"]:
        _printAvailableCheckpoints(experimentDir)
        return None

    # Load experiment tasks
    experimentTasks = expIface.getModelControl().get("tasks", [])

    # If the tasks list is empty, and this is a nupic environment description
    # file being run from the OPF, convert it to a simple OPF description file.
    if len(experimentTasks) == 0 and expIface.getModelControl()["environment"] == OpfEnvironment.Nupic:
        expIface.convertNupicEnvToOPF()
        experimentTasks = expIface.getModelControl().get("tasks", [])

    # Handle listTasks
    if options.privateOptions["listTasks"]:
        print "Available tasks:"

        for label in [t["taskLabel"] for t in experimentTasks]:
            print "\t", label

        return None

    # Construct the experiment instance
    if options.privateOptions["runCheckpointName"]:

        assert model is None

        checkpointName = options.privateOptions["runCheckpointName"]

        model = ModelFactory.loadFromCheckpoint(savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName))

    elif model is not None:
        print "Skipping creation of OPFExperiment instance: caller provided his own"
    else:
        modelDescription = expIface.getModelDescription()
        model = ModelFactory.create(modelDescription)

    # Handle "create model" request
    if options.privateOptions["createCheckpointName"]:
        checkpointName = options.privateOptions["createCheckpointName"]
        _saveModel(model=model, experimentDir=experimentDir, checkpointLabel=checkpointName)

        return model

    # Build the task list

    # Default task execution index list is in the natural list order of the tasks
    taskIndexList = range(len(experimentTasks))

    customTaskExecutionLabelsList = options.privateOptions["taskLabels"]
    if customTaskExecutionLabelsList:
        taskLabelsList = [t["taskLabel"] for t in experimentTasks]
        taskLabelsSet = set(taskLabelsList)

        customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)

        assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), (
            "Some custom-provided task execution labels don't correspond "
            "to actual task labels: mismatched labels: %r; actual task "
            "labels: %r."
        ) % (customTaskExecutionLabelsSet - taskLabelsSet, customTaskExecutionLabelsList)

        taskIndexList = [taskLabelsList.index(label) for label in customTaskExecutionLabelsList]

        print "#### Executing custom task list: %r" % [taskLabelsList[i] for i in taskIndexList]

    # Run all experiment tasks
    for taskIndex in taskIndexList:

        task = experimentTasks[taskIndex]

        # Create a task runner and run it!
        taskRunner = _TaskRunner(model=model, task=task, cmdOptions=options)
        taskRunner.run()
        del taskRunner

        if options.privateOptions["checkpointModel"]:
            _saveModel(model=model, experimentDir=experimentDir, checkpointLabel=task["taskLabel"])

    return model
Пример #8
0
def _runExperimentImpl(options, model=None):
  """Creates and runs the experiment

  Args:
    options: namedtuple ParseCommandLineOptionsResult
    model: For testing: may pass in an existing OPF Model instance
        to use instead of creating a new one.

  Returns: reference to OPFExperiment instance that was constructed (this
      is provided to aid with debugging) or None, if none was
      created.
  """
  jsonhelpers.validate(options.privateOptions,
                       schemaDict=g_parsedPrivateCommandLineOptionsSchema)

  # Load the experiment's description.py module
  experimentDir = options.experimentDir
  descriptionPyModule = opfhelpers.loadExperimentDescriptionScriptFromDir(
      experimentDir)
  expIface = opfhelpers.getExperimentDescriptionInterfaceFromModule(
      descriptionPyModule)

  # Handle "list checkpoints" request
  if options.privateOptions['listAvailableCheckpoints']:
    _printAvailableCheckpoints(experimentDir)
    return None

  # Load experiment tasks
  experimentTasks = expIface.getModelControl().get('tasks', [])

  # If the tasks list is empty, and this is a nupic environment description
  # file being run from the OPF, convert it to a simple OPF description file.
  if (len(experimentTasks) == 0 and
      expIface.getModelControl()['environment'] == OpfEnvironment.Nupic):
    expIface.convertNupicEnvToOPF()
    experimentTasks = expIface.getModelControl().get('tasks', [])

  # Ensures all the source locations are either absolute paths or relative to
  # the nupic.datafiles package_data location.
  expIface.normalizeStreamSources()

  # Handle listTasks
  if options.privateOptions['listTasks']:
    print "Available tasks:"

    for label in [t['taskLabel'] for t in experimentTasks]:
      print "\t", label

    return None

  # Construct the experiment instance
  if options.privateOptions['runCheckpointName']:

    assert model is None

    checkpointName = options.privateOptions['runCheckpointName']

    model = ModelFactory.loadFromCheckpoint(
          savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName))

  elif model is not None:
    print "Skipping creation of OPFExperiment instance: caller provided his own"
  else:
    modelDescription = expIface.getModelDescription()
    model = ModelFactory.create(modelDescription)

  # Handle "create model" request
  if options.privateOptions['createCheckpointName']:
    checkpointName = options.privateOptions['createCheckpointName']
    _saveModel(model=model,
               experimentDir=experimentDir,
               checkpointLabel=checkpointName)

    return model

  # Build the task list

  # Default task execution index list is in the natural list order of the tasks
  taskIndexList = range(len(experimentTasks))

  customTaskExecutionLabelsList = options.privateOptions['taskLabels']
  if customTaskExecutionLabelsList:
    taskLabelsList = [t['taskLabel'] for t in experimentTasks]
    taskLabelsSet = set(taskLabelsList)

    customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)

    assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), \
           ("Some custom-provided task execution labels don't correspond "
            "to actual task labels: mismatched labels: %r; actual task "
            "labels: %r.") % (customTaskExecutionLabelsSet - taskLabelsSet,
                              customTaskExecutionLabelsList)

    taskIndexList = [taskLabelsList.index(label) for label in
                     customTaskExecutionLabelsList]

    print "#### Executing custom task list: %r" % [taskLabelsList[i] for
                                                   i in taskIndexList]

  # Run all experiment tasks
  for taskIndex in taskIndexList:

    task = experimentTasks[taskIndex]

    # Create a task runner and run it!
    taskRunner = _TaskRunner(model=model,
                             task=task,
                             cmdOptions=options)
    taskRunner.run()
    del taskRunner

    if options.privateOptions['checkpointModel']:
      _saveModel(model=model,
                 experimentDir=experimentDir,
                 checkpointLabel=task['taskLabel'])

  return model