コード例 #1
0
 def __init__(self, use_saved_model, checkpoint_path, likelihood_path):
     self.use_saved_model = use_saved_model
     if use_saved_model:
         self.model = ModelFactory.loadFromCheckpoint(checkpoint_path)
         self.model.enableInference({'predictedField': 'cpu'})
         self.model.enableInference({'predictedField': 'memory'})
         with open(likelihood_path, "rb") as f:
             self.anomalyLikelihood = anomaly_likelihood.AnomalyLikelihood(
             ).readFromFile(f)
     else:
         self.model = ModelFactory.create(model_params.MODEL_PARAMS)
         self.model.enableInference({'predictedField': 'cpu'})
         self.model.enableInference({'predictedField': 'memory'})
         self.anomalyLikelihood = anomaly_likelihood.AnomalyLikelihood()
コード例 #2
0
    def testCheckpoint(self):
        tmpDir = tempfile.mkdtemp()
        model = ModelFactory.create(MODEL_PARAMS)
        model.enableInference({'predictedField': 'consumption'})
        headers = ['timestamp', 'consumption']

        # Now do a bunch of small load/train/save batches
        for _ in range(20):

            for _ in range(2):
                record = [
                    datetime.datetime(2013, 12, 12),
                    numpy.random.uniform(100)
                ]
                modelInput = dict(list(zip(headers, record)))
                model.run(modelInput)

            # Save and load a checkpoint after each batch. Clean up.
            tmpBundleName = os.path.join(tmpDir, "test_checkpoint")
            self.assertIs(model.save(tmpBundleName), None,
                          "Save command failed.")
            model = ModelFactory.loadFromCheckpoint(tmpBundleName)
            shutil.rmtree(tmpBundleName)
コード例 #3
0
def resurrect_model(saved_model):
    return ModelFactory.loadFromCheckpoint(saved_model)
コード例 #4
0
ファイル: experiment_runner.py プロジェクト: Erichy94/nupic
def _runExperimentImpl(options, model=None):
  """Creates and runs the experiment

  Args:
    options: namedtuple ParseCommandLineOptionsResult
    model: For testing: may pass in an existing OPF Model instance
        to use instead of creating a new one.

  Returns: reference to OPFExperiment instance that was constructed (this
      is provided to aid with debugging) or None, if none was
      created.
  """
  json_helpers.validate(options.privateOptions,
                        schemaDict=g_parsedPrivateCommandLineOptionsSchema)

  # Load the experiment's description.py module
  experimentDir = options.experimentDir
  descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
      experimentDir)
  expIface = helpers.getExperimentDescriptionInterfaceFromModule(
      descriptionPyModule)

  # Handle "list checkpoints" request
  if options.privateOptions['listAvailableCheckpoints']:
    _printAvailableCheckpoints(experimentDir)
    return None

  # Load experiment tasks
  experimentTasks = expIface.getModelControl().get('tasks', [])

  # If the tasks list is empty, and this is a nupic environment description
  # file being run from the OPF, convert it to a simple OPF description file.
  if (len(experimentTasks) == 0 and
      expIface.getModelControl()['environment'] == OpfEnvironment.Nupic):
    expIface.convertNupicEnvToOPF()
    experimentTasks = expIface.getModelControl().get('tasks', [])

  # Ensures all the source locations are either absolute paths or relative to
  # the nupic.datafiles package_data location.
  expIface.normalizeStreamSources()

  # Extract option
  newSerialization = options.privateOptions['newSerialization']

  # Handle listTasks
  if options.privateOptions['listTasks']:
    print "Available tasks:"

    for label in [t['taskLabel'] for t in experimentTasks]:
      print "\t", label

    return None

  # Construct the experiment instance
  if options.privateOptions['runCheckpointName']:

    assert model is None

    checkpointName = options.privateOptions['runCheckpointName']

    model = ModelFactory.loadFromCheckpoint(
          savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName),
          newSerialization=newSerialization)

  elif model is not None:
    print "Skipping creation of OPFExperiment instance: caller provided his own"
  else:
    modelDescription = expIface.getModelDescription()
    model = ModelFactory.create(modelDescription)

  # Handle "create model" request
  if options.privateOptions['createCheckpointName']:
    checkpointName = options.privateOptions['createCheckpointName']
    _saveModel(model=model,
               experimentDir=experimentDir,
               checkpointLabel=checkpointName,
               newSerialization=newSerialization)

    return model

  # Build the task list

  # Default task execution index list is in the natural list order of the tasks
  taskIndexList = range(len(experimentTasks))

  customTaskExecutionLabelsList = options.privateOptions['taskLabels']
  if customTaskExecutionLabelsList:
    taskLabelsList = [t['taskLabel'] for t in experimentTasks]
    taskLabelsSet = set(taskLabelsList)

    customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)

    assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), \
           ("Some custom-provided task execution labels don't correspond "
            "to actual task labels: mismatched labels: %r; actual task "
            "labels: %r.") % (customTaskExecutionLabelsSet - taskLabelsSet,
                              customTaskExecutionLabelsList)

    taskIndexList = [taskLabelsList.index(label) for label in
                     customTaskExecutionLabelsList]

    print "#### Executing custom task list: %r" % [taskLabelsList[i] for
                                                   i in taskIndexList]

  # Run all experiment tasks
  for taskIndex in taskIndexList:

    task = experimentTasks[taskIndex]

    # Create a task runner and run it!
    taskRunner = _TaskRunner(model=model,
                             task=task,
                             cmdOptions=options)
    taskRunner.run()
    del taskRunner

    if options.privateOptions['checkpointModel']:
      _saveModel(model=model,
                 experimentDir=experimentDir,
                 checkpointLabel=task['taskLabel'],
                 newSerialization=newSerialization)

  return model
コード例 #5
0
def _runExperimentImpl(options, model=None):
    """Creates and runs the experiment

  Args:
    options: namedtuple ParseCommandLineOptionsResult
    model: For testing: may pass in an existing OPF Model instance
        to use instead of creating a new one.

  Returns: reference to OPFExperiment instance that was constructed (this
      is provided to aid with debugging) or None, if none was
      created.
  """
    jsonhelpers.validate(options.privateOptions,
                         schemaDict=g_parsedPrivateCommandLineOptionsSchema)

    # Load the experiment's description.py module
    experimentDir = options.experimentDir
    descriptionPyModule = opf_helpers.loadExperimentDescriptionScriptFromDir(
        experimentDir)
    expIface = opf_helpers.getExperimentDescriptionInterfaceFromModule(
        descriptionPyModule)

    # Handle "list checkpoints" request
    if options.privateOptions['listAvailableCheckpoints']:
        _printAvailableCheckpoints(experimentDir)
        return None

    # Load experiment tasks
    experimentTasks = expIface.getModelControl().get('tasks', [])

    # If the tasks list is empty, and this is a nupic environment description
    # file being run from the OPF, convert it to a simple OPF description file.
    if (len(experimentTasks) == 0 and expIface.getModelControl()['environment']
            == OpfEnvironment.Nupic):
        expIface.convertNupicEnvToOPF()
        experimentTasks = expIface.getModelControl().get('tasks', [])

    # Ensures all the source locations are either absolute paths or relative to
    # the nupic.datafiles package_data location.
    expIface.normalizeStreamSources()

    # Extract option
    newSerialization = options.privateOptions['newSerialization']

    # Handle listTasks
    if options.privateOptions['listTasks']:
        print "Available tasks:"

        for label in [t['taskLabel'] for t in experimentTasks]:
            print "\t", label

        return None

    # Construct the experiment instance
    if options.privateOptions['runCheckpointName']:

        assert model is None

        checkpointName = options.privateOptions['runCheckpointName']

        model = ModelFactory.loadFromCheckpoint(
            savedModelDir=_getModelCheckpointDir(experimentDir,
                                                 checkpointName),
            newSerialization=newSerialization)

    elif model is not None:
        print "Skipping creation of OPFExperiment instance: caller provided his own"
    else:
        modelDescription = expIface.getModelDescription()
        model = ModelFactory.create(modelDescription)

    # Handle "create model" request
    if options.privateOptions['createCheckpointName']:
        checkpointName = options.privateOptions['createCheckpointName']
        _saveModel(model=model,
                   experimentDir=experimentDir,
                   checkpointLabel=checkpointName,
                   newSerialization=newSerialization)

        return model

    # Build the task list

    # Default task execution index list is in the natural list order of the tasks
    taskIndexList = range(len(experimentTasks))

    customTaskExecutionLabelsList = options.privateOptions['taskLabels']
    if customTaskExecutionLabelsList:
        taskLabelsList = [t['taskLabel'] for t in experimentTasks]
        taskLabelsSet = set(taskLabelsList)

        customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)

        assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), \
               ("Some custom-provided task execution labels don't correspond "
                "to actual task labels: mismatched labels: %r; actual task "
                "labels: %r.") % (customTaskExecutionLabelsSet - taskLabelsSet,
                                  customTaskExecutionLabelsList)

        taskIndexList = [
            taskLabelsList.index(label)
            for label in customTaskExecutionLabelsList
        ]

        print "#### Executing custom task list: %r" % [
            taskLabelsList[i] for i in taskIndexList
        ]

    # Run all experiment tasks
    for taskIndex in taskIndexList:

        task = experimentTasks[taskIndex]

        # Create a task runner and run it!
        taskRunner = _TaskRunner(model=model, task=task, cmdOptions=options)
        taskRunner.run()
        del taskRunner

        if options.privateOptions['checkpointModel']:
            _saveModel(model=model,
                       experimentDir=experimentDir,
                       checkpointLabel=task['taskLabel'],
                       newSerialization=newSerialization)

    return model
コード例 #6
0
ファイル: run.py プロジェクト: htm-community/nupic.critic
def resurrect_model(saved_model):
  return ModelFactory.loadFromCheckpoint(saved_model)
コード例 #7
0
    def __init__(self, use_saved_model, checkpoint_path, likelihood_path):
        self.use_saved_model = use_saved_model

        if use_saved_model:
            self.salt_acc_model = ModelFactory.loadFromCheckpoint(
                checkpoint_path + '_SALT_ACC')
            self.salt_qua_model = ModelFactory.loadFromCheckpoint(
                checkpoint_path + '_SALT_QUA')
            self.pepa_acc_model = ModelFactory.loadFromCheckpoint(
                checkpoint_path + '_PEPA_ACC')
            self.pepa_qua_model = ModelFactory.loadFromCheckpoint(
                checkpoint_path + '_PEPA_QUA')

            self.salt_acc_model.enableInference({'predictedField': 'x'})
            self.salt_acc_model.enableInference({'predictedField': 'y'})
            self.salt_acc_model.enableInference({'predictedField': 'z'})

            self.salt_qua_model.enableInference({'predictedField': 'w'})
            self.salt_qua_model.enableInference({'predictedField': 'x'})
            self.salt_qua_model.enableInference({'predictedField': 'y'})
            self.salt_qua_model.enableInference({'predictedField': 'z'})

            self.pepa_acc_model.enableInference({'predictedField': 'x'})
            self.pepa_acc_model.enableInference({'predictedField': 'y'})
            self.pepa_acc_model.enableInference({'predictedField': 'z'})

            self.pepa_qua_model.enableInference({'predictedField': 'w'})
            self.pepa_qua_model.enableInference({'predictedField': 'x'})
            self.pepa_qua_model.enableInference({'predictedField': 'y'})
            self.pepa_qua_model.enableInference({'predictedField': 'z'})

            with open(likelihood_path, "rb") as f:
                self.anomalyLikelihood = anomaly_likelihood.AnomalyLikelihood(
                ).readFromFile(f)

        else:
            self.salt_acc_model = ModelFactory.create(
                model_params_salt_acc.MODEL_PARAMS)
            self.salt_qua_model = ModelFactory.create(
                model_params_salt_qua.MODEL_PARAMS)
            self.pepa_acc_model = ModelFactory.create(
                model_params_pepa_acc.MODEL_PARAMS)
            self.pepa_qua_model = ModelFactory.create(
                model_params_pepa_qua.MODEL_PARAMS)

            self.salt_acc_model.enableInference({'predictedField': 'x'})
            self.salt_acc_model.enableInference({'predictedField': 'y'})
            self.salt_acc_model.enableInference({'predictedField': 'z'})

            self.salt_qua_model.enableInference({'predictedField': 'w'})
            self.salt_qua_model.enableInference({'predictedField': 'x'})
            self.salt_qua_model.enableInference({'predictedField': 'y'})
            self.salt_qua_model.enableInference({'predictedField': 'z'})

            self.pepa_acc_model.enableInference({'predictedField': 'x'})
            self.pepa_acc_model.enableInference({'predictedField': 'y'})
            self.pepa_acc_model.enableInference({'predictedField': 'z'})

            self.pepa_qua_model.enableInference({'predictedField': 'w'})
            self.pepa_qua_model.enableInference({'predictedField': 'x'})
            self.pepa_qua_model.enableInference({'predictedField': 'y'})
            self.pepa_qua_model.enableInference({'predictedField': 'z'})

            self.anomalyLikelihood = anomaly_likelihood.AnomalyLikelihood()
コード例 #8
0
ファイル: models_factory.py プロジェクト: iyuvalk/pensu
    def __get_model(self, metric, model_type,
                    models_number_below_configured_limit):
        model_prefix = "_" + model_type
        result_model = None
        model_fqdn = model_prefix + "." + metric["metric_name"]
        if not self.__loaded_models.model_exists(model_fqdn):
            self.__create_model_thread_lock.acquire()
            try:
                if not self.__loaded_models.model_exists(
                        model_fqdn) and os.path.isdir(
                            self.__model_storage_manager.get_save_path(
                                model_fqdn)):
                    if models_number_below_configured_limit:
                        try:
                            self.__loaded_models.add_model_for_metric(
                                model_fqdn,
                                NupicModelFactory.loadFromCheckpoint(
                                    self.__model_storage_manager.get_save_path(
                                        model_fqdn)))
                            self._logger.debug(
                                "__get_model", "LOADED " + model_type.upper() +
                                " MODEL FROM DISK")
                        except Exception as ex:
                            self.__loaded_models.add_model_for_metric(
                                model_fqdn,
                                self.__create_model(
                                    self.get_model_params_from_metric_name(
                                        metric["metric_family"], model_type),
                                    self._config_mgr.get("prediction_steps")))
                            self._logger.warn(
                                "__get_model",
                                "Failed to create a " + model_type +
                                " model from disk",
                                exception_message=str(ex.message),
                                exception_type=str(type(ex).__name__))

                if not self.__loaded_models.model_exists(
                        model_fqdn) and not os.path.isdir(
                            self.__model_storage_manager.get_save_path(
                                model_fqdn)):
                    if models_number_below_configured_limit:
                        model_params = self.get_model_params_from_metric_name(
                            metric["metric_family"], model_type)
                        prediction_steps = self._config_mgr.get(
                            "prediction_steps")
                        model_to_add = self.__create_model(
                            model_params, prediction_steps)
                        self.__loaded_models.add_model_for_metric(
                            model_fqdn, model_to_add)
                        self._logger.debug("__get_model",
                                           model_type.capitalize() +
                                           " model created from params",
                                           metric=str(metric["metric_name"]))
            finally:
                self.__create_model_thread_lock.release()
        if self.__loaded_models.model_exists(model_fqdn):
            result_model = self.__loaded_models.get_model(model_fqdn)
            self._logger.debug("__get_model",
                               model_type.capitalize() +
                               " model loaded from cache",
                               metric=str(metric["metric_name"]))
        return result_model