示例#1
0
  def __init__(self, model, task, cmdOptions):
    """ Constructor

    Args:
      model: The OPF Model instance against which to run the task
      task: A dictionary conforming to opfTaskSchema.json
      cmdOptions: ParseCommandLineOptionsResult namedtuple
    """
    validateOpfJsonValue(task, "opfTaskSchema.json")

    # Set up our logger
    self.__logger = logging.getLogger(".".join(
      ['com.numenta', self.__class__.__module__, self.__class__.__name__]))
    #self.__logger.setLevel(logging.DEBUG)

    self.__logger.debug(("Instantiated %s(" + \
                      "model=%r, " + \
                      "task=%r, " + \
                      "cmdOptions=%r)") % \
                        (self.__class__.__name__,
                         model,
                         task,
                         cmdOptions))

    # Generate a new dataset from streamDef and create the dataset reader
    streamDef = task['dataset']
    datasetReader = opf_basic_environment.BasicDatasetReader(streamDef)

    self.__model = model
    self.__datasetReader = datasetReader
    self.__task = task
    self.__cmdOptions = cmdOptions


    self.__predictionLogger = opf_basic_environment.BasicPredictionLogger(
      fields=model.getFieldInfo(),
      experimentDir=cmdOptions.experimentDir,
      label=task['taskLabel'],
      inferenceType=self.__model.getInferenceType())

    taskControl = task['taskControl']

    # Create Task Driver
    self.__taskDriver = OPFTaskDriver(
      taskControl=taskControl,
      model=model)

    loggedMetricPatterns = taskControl.get('loggedMetrics', None)
    loggedMetricLabels = matchPatterns(loggedMetricPatterns,
                                       self.__taskDriver.getMetricLabels())

    self.__predictionLogger.setLoggedMetrics(loggedMetricLabels)

    # Create a prediction metrics logger
    self.__metricsLogger = opf_basic_environment.BasicPredictionMetricsLogger(
      experimentDir=cmdOptions.experimentDir,
      label=task['taskLabel'])
示例#2
0
  def __init__(self, model, task, cmdOptions):
    """ Constructor

    Args:
      model: The OPF Model instance against which to run the task
      task: A dictionary conforming to opfTaskSchema.json
      cmdOptions: ParseCommandLineOptionsResult namedtuple
    """
    validateOpfJsonValue(task, "opfTaskSchema.json")

    # Set up our logger
    self.__logger = logging.getLogger(".".join(
      ['com.numenta', self.__class__.__module__, self.__class__.__name__]))
    #self.__logger.setLevel(logging.DEBUG)

    self.__logger.debug(("Instantiated %s(" + \
                      "model=%r, " + \
                      "task=%r, " + \
                      "cmdOptions=%r)") % \
                        (self.__class__.__name__,
                         model,
                         task,
                         cmdOptions))

    # Generate a new dataset from streamDef and create the dataset reader
    streamDef = task['dataset']
    datasetReader = opf_basic_environment.BasicDatasetReader(streamDef)

    self.__model = model
    self.__datasetReader = datasetReader
    self.__task = task
    self.__cmdOptions = cmdOptions


    self.__predictionLogger = opf_basic_environment.BasicPredictionLogger(
      fields=model.getFieldInfo(),
      experimentDir=cmdOptions.experimentDir,
      label=task['taskLabel'],
      inferenceType=self.__model.getInferenceType())

    taskControl = task['taskControl']

    # Create Task Driver
    self.__taskDriver = OPFTaskDriver(
      taskControl=taskControl,
      model=model)

    loggedMetricPatterns = taskControl.get('loggedMetrics', None)
    loggedMetricLabels = matchPatterns(loggedMetricPatterns,
                                       self.__taskDriver.getMetricLabels())

    self.__predictionLogger.setLoggedMetrics(loggedMetricLabels)

    # Create a prediction metrics logger
    self.__metricsLogger = opf_basic_environment.BasicPredictionMetricsLogger(
      experimentDir=cmdOptions.experimentDir,
      label=task['taskLabel'])
示例#3
0
  def _createPredictionLogger(self):
    """
    Creates the model's PredictionLogger object, which is an interface to write
    model results to a permanent storage location
    """
    # Write results to a file
    self._predictionLogger = BasicPredictionLogger(
      fields=self._model.getFieldInfo(),
      experimentDir=self._experimentDir,
      label = "hypersearch-worker",
      inferenceType=self._model.getInferenceType())

    if self.__loggedMetricPatterns:
      metricLabels = self.__metricMgr.getMetricLabels()
      loggedMetrics = matchPatterns(self.__loggedMetricPatterns, metricLabels)
      self._predictionLogger.setLoggedMetrics(loggedMetrics)
示例#4
0
  def __getOptimizedMetricLabel(self):
    """ Get the label for the metric being optimized. This function also caches
    the label in the instance variable self._optimizedMetricLabel

    Parameters:
    -----------------------------------------------------------------------
    metricLabels:   A sequence of all the labels being computed for this model

    Returns:        The label for the metric being optmized over
    """
    matchingKeys = matchPatterns([self._optimizeKeyPattern],
                                  self._getMetricLabels())

    if len(matchingKeys) == 0:
      raise Exception("None of the generated metrics match the specified "
                      "optimization pattern: %s. Available metrics are %s" % \
                       (self._optimizeKeyPattern, self._getMetricLabels()))
    elif len(matchingKeys) > 1:
      raise Exception("The specified optimization pattern '%s' matches more "
              "than one metric: %s" % (self._optimizeKeyPattern, matchingKeys))

    return matchingKeys[0]
示例#5
0
    def run(self):
        """ Runs the OPF Model

    Parameters:
    -------------------------------------------------------------------------
    retval:  (completionReason, completionMsg)
              where completionReason is one of the ClientJobsDAO.CMPL_REASON_XXX
                equates.
    """
        # -----------------------------------------------------------------------
        # Load the experiment's description.py module
        descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
            self._experimentDir)
        expIface = helpers.getExperimentDescriptionInterfaceFromModule(
            descriptionPyModule)
        expIface.normalizeStreamSources()

        modelDescription = expIface.getModelDescription()
        self._modelControl = expIface.getModelControl()

        # -----------------------------------------------------------------------
        # Create the input data stream for this task
        streamDef = self._modelControl['dataset']

        from nupic.data.stream_reader import StreamReader
        readTimeout = 0

        self._inputSource = StreamReader(streamDef,
                                         isBlocking=False,
                                         maxTimeout=readTimeout)

        # -----------------------------------------------------------------------
        #Get field statistics from the input source
        fieldStats = self._getFieldStats()
        # -----------------------------------------------------------------------
        # Construct the model instance
        self._model = ModelFactory.create(modelDescription)
        self._model.setFieldStatistics(fieldStats)
        self._model.enableLearning()
        self._model.enableInference(
            self._modelControl.get("inferenceArgs", None))

        # -----------------------------------------------------------------------
        # Instantiate the metrics
        self.__metricMgr = MetricsManager(
            self._modelControl.get('metrics', None),
            self._model.getFieldInfo(), self._model.getInferenceType())

        self.__loggedMetricPatterns = self._modelControl.get(
            "loggedMetrics", [])

        self._optimizedMetricLabel = self.__getOptimizedMetricLabel()
        self._reportMetricLabels = matchPatterns(self._reportKeyPatterns,
                                                 self._getMetricLabels())

        # -----------------------------------------------------------------------
        # Initialize periodic activities (e.g., for model result updates)
        self._periodic = self._initPeriodicActivities()

        # -----------------------------------------------------------------------
        # Create our top-level loop-control iterator
        numIters = self._modelControl.get('iterationCount', -1)

        # Are we asked to turn off learning for a certain # of iterations near the
        #  end?
        learningOffAt = None
        iterationCountInferOnly = self._modelControl.get(
            'iterationCountInferOnly', 0)
        if iterationCountInferOnly == -1:
            self._model.disableLearning()
        elif iterationCountInferOnly > 0:
            assert numIters > iterationCountInferOnly, "when iterationCountInferOnly " \
              "is specified, iterationCount must be greater than " \
              "iterationCountInferOnly."
            learningOffAt = numIters - iterationCountInferOnly

        self.__runTaskMainLoop(numIters, learningOffAt=learningOffAt)

        # -----------------------------------------------------------------------
        # Perform final operations for model
        self._finalize()

        return (self._cmpReason, None)