コード例 #1
0
  def __init__(self, metricSpecs, fieldInfo, inferenceType):
    """
    Constructs a Metrics Manager

    Parameters:
    -----------------------------------------------------------------------
    metricSpecs:    A sequence of MetricSpecs that specify which metrics should
                    be calculated

    inferenceType:  An opfutils.inferenceType value that specifies the inference
                    type of the associated model. This affects how metrics are
                    calculated. FOR EXAMPLE, temporal models save the inference
                    from the previous timestep to match it to the ground truth
                    value in the current timestep
    """

    self.__metricSpecs = []
    self.__metrics = []
    self.__metricLabels = []

    # Maps field names to indices. Useful for looking up input/predictions by
    # field name
    self.__fieldNameIndexMap = dict( [(info.name, i) \
                                      for i, info in enumerate(fieldInfo)] )

    self.__constructMetricsModules(metricSpecs)
    self.__currentGroundTruth = None
    self.__currentInference = None

    self.__isTemporal = InferenceType.isTemporal(inferenceType)
    if self.__isTemporal:
      self.__inferenceShifter = InferenceShifter()
コード例 #2
0
    def __init__(self, metricSpecs, fieldInfo, inferenceType):
        """
    Constructs a Metrics Manager

    Parameters:
    -----------------------------------------------------------------------
    metricSpecs:    A sequence of MetricSpecs that specify which metrics should
                    be calculated

    inferenceType:  An opfutils.inferenceType value that specifies the inference
                    type of the associated model. This affects how metrics are
                    calculated. FOR EXAMPLE, temporal models save the inference
                    from the previous timestep to match it to the ground truth
                    value in the current timestep
    """

        self.__metricSpecs = []
        self.__metrics = []
        self.__metricLabels = []

        # Maps field names to indices. Useful for looking up input/predictions by
        # field name
        self.__fieldNameIndexMap = dict( [(info.name, i) \
                                          for i, info in enumerate(fieldInfo)] )

        self.__constructMetricsModules(metricSpecs)
        self.__currentGroundTruth = None
        self.__currentInference = None

        self.__isTemporal = InferenceType.isTemporal(inferenceType)
        if self.__isTemporal:
            self.__inferenceShifter = InferenceShifter()
コード例 #3
0
ファイル: opfbasicenvironment.py プロジェクト: runt18/nupic
  def writeRecords(self, modelResults, progressCB=None):
    """ Same as writeRecord above, but emits multiple rows in one shot.

    modelResults:  a list of opfutils.ModelResult objects, Each dictionary
                    represents one record.

    progressCB: an optional callback method that will be called after each
                  batch of records is written.

    """

    # Instantiate the logger if it doesn't exist yet
    if self.__logAdapter is None and modelResults:
      self.__writer = _BasicPredictionWriter(
                                      experimentDir=self.__experimentDir,
                                      label=self.__label,
                                      inferenceType=self.__inferenceType,
                                      fields=self.__inputFieldsMeta,
                                      metricNames=self.__loggedMetricNames,
                                      checkpointSource=self.__checkpointCache)

      # Dispose of our checkpoint cache now
      if self.__checkpointCache is not None:
        self.__checkpointCache.close()
        self.__checkpointCache = None

      if InferenceType.isTemporal(self.__inferenceType):
        logAdapterClass = TemporalPredictionLogAdapter
      else:
        logAdapterClass = NonTemporalPredictionLogAdapter

      self.__logAdapter = logAdapterClass(self.__writer)
      self.__writer.setLoggedMetrics(self.__loggedMetricNames)


    for modelResult in modelResults:
      if modelResult.inferences is not None:
        # -----------------------------------------------------------------------
        # Update the prediction log
        self.__logAdapter.update(modelResult)

      else:
        # Handle the learn-only scenario: pass input to existing logAdapters
        self.__logAdapter.update(modelResult)

    return
コード例 #4
0
  def writeRecords(self, modelResults, progressCB=None):
    """ Same as writeRecord above, but emits multiple rows in one shot.

    modelResults:  a list of opfutils.ModelResult objects, Each dictionary
                    represents one record.

    progressCB: an optional callback method that will be called after each
                  batch of records is written.

    """

    # Instantiate the logger if it doesn't exist yet
    if self.__logAdapter is None and modelResults:
      self.__writer = _BasicPredictionWriter(
                                      experimentDir=self.__experimentDir,
                                      label=self.__label,
                                      inferenceType=self.__inferenceType,
                                      fields=self.__inputFieldsMeta,
                                      metricNames=self.__loggedMetricNames,
                                      checkpointSource=self.__checkpointCache)

      # Dispose of our checkpoint cache now
      if self.__checkpointCache is not None:
        self.__checkpointCache.close()
        self.__checkpointCache = None

      if InferenceType.isTemporal(self.__inferenceType):
        logAdapterClass = TemporalPredictionLogAdapter
      else:
        logAdapterClass = NonTemporalPredictionLogAdapter

      self.__logAdapter = logAdapterClass(self.__writer)
      self.__writer.setLoggedMetrics(self.__loggedMetricNames)


    for modelResult in modelResults:
      if modelResult.inferences is not None:
        # -----------------------------------------------------------------------
        # Update the prediction log
        self.__logAdapter.update(modelResult)

      else:
        # Handle the learn-only scenario: pass input to existing logAdapters
        self.__logAdapter.update(modelResult)

    return
コード例 #5
0
  def __init__(self,
      inferenceType=InferenceType.TemporalNextStep,
      predictedField=None,
      sensorParams={},
      spEnable=True,
      spParams={},
      
      # TODO: We can't figure out what this is. Remove?
      trainSPNetOnlyIfRequested=False,  
      tpEnable=True,
      tpParams={},
      clParams={},
      anomalyParams={},
      minLikelihoodThreshold=DEFAULT_LIKELIHOOD_THRESHOLD,
      maxPredictionsPerStep=DEFAULT_MAX_PREDICTIONS_PER_STEP):
    """CLAModel constructor.

    Args:
      inferenceType: A value from the InferenceType enum class.
      predictedField: The field to predict for multistep prediction.
      sensorParams: A dictionary specifying the sensor parameters.
      spEnable: Whether or not to use a spatial pooler.
      spParams: A dictionary specifying the spatial pooler parameters. These
          are passed to the spatial pooler.
      trainSPNetOnlyIfRequested: If set, don't create an SP network unless the
          user requests SP metrics.
      tpEnable: Whether to use a temporal pooler.
      tpParams: A dictionary specifying the temporal pooler parameters. These
          are passed to the temporal pooler.
      clParams: A dictionary specifying the classifier parameters. These are
          are passed to the classifier.
      anomalyParams: Anomaly detection parameters
      minLikelihoodThreshold: The minimum likelihood value to include in
          inferences.  Currently only applies to multistep inferences.
      maxPredictionsPerStep: Maximum number of predictions to include for
          each step in inferences. The predictions with highest likelihood are
          included.
    """
    if not inferenceType in self.__supportedInferenceKindSet:
      raise ValueError("{0} received incompatible inference type: {1}"\
                       .format(self.__class__, inferenceType))

    # Call super class constructor
    super(CLAModel, self).__init__(inferenceType)

    # self.__restoringFromState is set to True by our __setstate__ method
    # and back to False at completion of our _deSerializeExtraData() method.
    self.__restoringFromState = False
    self.__restoringFromV1 = False

    # Intitialize logging
    self.__logger = initLogger(self)

    self.__logger.debug("Instantiating %s." % self.__myClassName)


    # TODO: VERBOSITY should be deprecated since we now have logging with levels
    self.__VERBOSITY = 0

    self._minLikelihoodThreshold = minLikelihoodThreshold
    self._maxPredictionsPerStep = maxPredictionsPerStep

    # set up learning parameters (note: these may be replaced via
    # enable/disable//SP/TP//Learning methods)
    self.__spLearningEnabled = bool(spEnable)
    self.__tpLearningEnabled = bool(tpEnable)
    
    # Explicitly exclude the TP if this type of inference doesn't require it
    if not InferenceType.isTemporal(self.getInferenceType()) \
       or self.getInferenceType() == InferenceType.NontemporalMultiStep:
      tpEnable = False

    self._netInfo = None
    self._hasSP = spEnable
    self._hasTP = tpEnable
    
    self._classifierInputEncoder = None
    self._predictedFieldIdx = None
    self._predictedFieldName = None
    self._numFields = None

    # -----------------------------------------------------------------------
    # Create the network
    self._netInfo = self.__createCLANetwork(
        sensorParams, spEnable, spParams, tpEnable, tpParams, clParams,
        anomalyParams)


    # Initialize Spatial Anomaly detection parameters
    if self.getInferenceType() == InferenceType.NontemporalAnomaly:
      self._getSPRegion().setParameter('anomalyMode', True)

    # Initialize Temporal Anomaly detection parameters
    if self.getInferenceType() == InferenceType.TemporalAnomaly:
      self._getTPRegion().setParameter('anomalyMode', True)
      self._prevPredictedColumns = numpy.array([])

    # -----------------------------------------------------------------------
    # This flag, if present tells us not to train the SP network unless
    #  the user specifically asks for the SP inference metric
    self.__trainSPNetOnlyIfRequested = trainSPNetOnlyIfRequested

    self.__numRunCalls = 0

    # Tracks whether finishedLearning() has been called
    self.__finishedLearning = False

    self.__logger.info("Instantiated %s" % self.__class__.__name__)

    return