Beispiel #1
0
    def shift(self, modelResult):
        """Shift the model result and return the new instance.

    Queues up the T(i+1) prediction value and emits a T(i)
    input/prediction pair, if possible. E.g., if the previous T(i-1)
    iteration was learn-only, then we would not have a T(i) prediction in our
    FIFO and would not be able to emit a meaningful input/prediction pair.

    :param modelResult: A :class:`~.nupic.frameworks.opf.opfutils.ModelResult`
                        instance to shift.
    :return: A :class:`~.nupic.frameworks.opf.opfutils.ModelResult` instance that
             has been shifted
    """
        inferencesToWrite = {}

        if self._inferenceBuffer is None:
            maxDelay = InferenceElement.getMaxDelay(modelResult.inferences)
            self._inferenceBuffer = collections.deque(maxlen=maxDelay + 1)

        self._inferenceBuffer.appendleft(copy.deepcopy(modelResult.inferences))

        for inferenceElement, inference in modelResult.inferences.iteritems():
            if isinstance(inference, dict):
                inferencesToWrite[inferenceElement] = {}
                for key, _ in inference.iteritems():
                    delay = InferenceElement.getTemporalDelay(
                        inferenceElement, key)
                    if len(self._inferenceBuffer) > delay:
                        prevInference = self._inferenceBuffer[delay][
                            inferenceElement][key]
                        inferencesToWrite[inferenceElement][
                            key] = prevInference
                    else:
                        inferencesToWrite[inferenceElement][key] = None
            else:
                delay = InferenceElement.getTemporalDelay(inferenceElement)
                if len(self._inferenceBuffer) > delay:
                    inferencesToWrite[inferenceElement] = (
                        self._inferenceBuffer[delay][inferenceElement])
                else:
                    if type(inference) in (list, tuple):
                        inferencesToWrite[inferenceElement] = [
                            None
                        ] * len(inference)
                    else:
                        inferencesToWrite[inferenceElement] = None

        shiftedResult = ModelResult(
            rawInput=modelResult.rawInput,
            sensorInput=modelResult.sensorInput,
            inferences=inferencesToWrite,
            metrics=modelResult.metrics,
            predictedFieldIdx=modelResult.predictedFieldIdx,
            predictedFieldName=modelResult.predictedFieldName)
        return shiftedResult
def _testTemporalShift():
    """ Test to see if the metrics manager correctly shifts records for multistep
  prediction cases
  """
    print "*Testing Multistep temporal shift*..."
    from nupic.data.field_meta import (FieldMetaInfo, FieldMetaType,
                                       FieldMetaSpecial)

    from nupic.frameworks.opf.metrics import MetricSpec
    from nupic.frameworks.opf.opf_utils import ModelResult, SensorInput
    onlineMetrics = ()

    modelFieldMetaInfo = (FieldMetaInfo(name='consumption',
                                        type=FieldMetaType.float,
                                        special=FieldMetaSpecial.none), )

    mgr = MetricsManager(metricSpecs=onlineMetrics,
                         fieldInfo=modelFieldMetaInfo,
                         inferenceType=InferenceType.TemporalMultiStep)

    groundTruths = [{'consumption': i} for i in range(10)]
    oneStepInfs = reversed(range(10))
    threeStepInfs = range(5, 15)

    for iterNum, gt, os, ts in zip(xrange(10), groundTruths, oneStepInfs,
                                   threeStepInfs):
        inferences = {InferenceElement.multiStepPredictions: {1: os, 3: ts}}
        sensorInput = SensorInput(dataDict=[gt])
        result = ModelResult(sensorInput=sensorInput, inferences=inferences)
        mgr.update(result)

        assert mgr._getGroundTruth(
            InferenceElement.multiStepPredictions)[0] == gt
        if iterNum < 1:
            #assert mgr._getInference(InferenceElement.multiStepPredictions) is None
            assert mgr._getInference(
                InferenceElement.multiStepPredictions)[1] is None
        else:
            prediction = mgr._getInference(
                InferenceElement.multiStepPredictions)[1]
            assert prediction == 10 - iterNum

        if iterNum < 3:
            inference = mgr._getInference(
                InferenceElement.multiStepPredictions)
            assert inference is None or inference[3] is None
        else:
            prediction = mgr._getInference(
                InferenceElement.multiStepPredictions)[3]
            assert prediction == iterNum + 2
Beispiel #3
0
 def _shiftAndCheck(self, inferences, expectedOutput):
     inferenceShifter = InferenceShifter()
     for inference, expected in zip(inferences, expectedOutput):
         inputResult = ModelResult(inferences=inference)
         outputResult = inferenceShifter.shift(inputResult)
         self.assertEqual(outputResult.inferences, expected)
def _testMetricsMgr():
    print "*Testing Metrics Managers*..."
    from nupic.data.field_meta import (FieldMetaInfo, FieldMetaType,
                                       FieldMetaSpecial)

    from nupic.frameworks.opf.metrics import MetricSpec
    from nupic.frameworks.opf.opf_utils import ModelResult, SensorInput
    onlineMetrics = (MetricSpec(metric="aae", inferenceElement='', \
                                field="consumption", params={}),)

    print "TESTING METRICS MANAGER (BASIC PLUMBING TEST)..."

    modelFieldMetaInfo = (FieldMetaInfo(name='temperature',
                                        type=FieldMetaType.float,
                                        special=FieldMetaSpecial.none),
                          FieldMetaInfo(name='consumption',
                                        type=FieldMetaType.float,
                                        special=FieldMetaSpecial.none))

    # -----------------------------------------------------------------------
    # Test to make sure that invalid InferenceElements are caught
    try:
        MetricsManager(metricSpecs=onlineMetrics,
                       fieldInfo=modelFieldMetaInfo,
                       inferenceType=InferenceType.TemporalNextStep)
    except ValueError:
        print "Caught bad inference element: PASS"

    print
    onlineMetrics = (MetricSpec(metric="aae",
                                inferenceElement=InferenceElement.prediction,
                                field="consumption",
                                params={}), )

    temporalMetrics = MetricsManager(
        metricSpecs=onlineMetrics,
        fieldInfo=modelFieldMetaInfo,
        inferenceType=InferenceType.TemporalNextStep)

    inputs = [
        {
            'groundTruthRow': [9, 7],
            'predictionsDict': {
                InferenceType.TemporalNextStep: [12, 17]
            }
        },
        {
            'groundTruthRow': [12, 17],
            'predictionsDict': {
                InferenceType.TemporalNextStep: [14, 19]
            }
        },
        {
            'groundTruthRow': [14, 20],
            'predictionsDict': {
                InferenceType.TemporalNextStep: [16, 21]
            }
        },
        {
            'groundTruthRow': [9, 7],
            'predictionsDict': {
                InferenceType.TemporalNextStep: None
            }
        },
    ]

    for element in inputs:
        groundTruthRow = element['groundTruthRow']
        tPredictionRow = element['predictionsDict'][
            InferenceType.TemporalNextStep]

        result = ModelResult(sensorInput=SensorInput(dataRow=groundTruthRow,
                                                     dataEncodings=None,
                                                     sequenceReset=0,
                                                     category=None),
                             inferences={'prediction': tPredictionRow})

        temporalMetrics.update(result)

    assert temporalMetrics.getMetrics().values()[0] == 15.0 / 3.0, \
            "Expected %f, got %f" %(15.0/3.0,
                                    temporalMetrics.getMetrics().values()[0])
    print "ok"

    return
Beispiel #5
0
    def run(self):
        """ Runs the given OPF task against the given Model instance """

        self._logger.debug("Starting Dummy Model: modelID=%s;" %
                           (self._modelID))

        # =========================================================================
        # Initialize periodic activities (e.g., for model result updates)
        # =========================================================================
        periodic = self._initPeriodicActivities()

        self._optimizedMetricLabel = self._optimizeKeyPattern
        self._reportMetricLabels = [self._optimizeKeyPattern]

        # =========================================================================
        # Create our top-level loop-control iterator
        # =========================================================================
        if self._iterations >= 0:
            iterTracker = iter(xrange(self._iterations))
        else:
            iterTracker = iter(itertools.count())

        # =========================================================================
        # This gets set in the unit tests. It tells the worker to sys exit
        #  the first N models. This is how we generate orphaned models
        doSysExit = False
        if self._sysExitModelRange is not None:
            modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(
                self._jobID)
            modelIDs = [x[0] for x in modelAndCounters]
            modelIDs.sort()
            (beg, end) = self._sysExitModelRange
            if self._modelID in modelIDs[int(beg):int(end)]:
                doSysExit = True

        if self._delayModelRange is not None:
            modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(
                self._jobID)
            modelIDs = [x[0] for x in modelAndCounters]
            modelIDs.sort()
            (beg, end) = self._delayModelRange
            if self._modelID in modelIDs[int(beg):int(end)]:
                time.sleep(10)

            # DEBUG!!!! infinite wait if we have 50 models
            #if len(modelIDs) >= 50:
            #  jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]
            #  while not jobCancel:
            #    time.sleep(1)
            #    jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]

        if self._errModelRange is not None:
            modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(
                self._jobID)
            modelIDs = [x[0] for x in modelAndCounters]
            modelIDs.sort()
            (beg, end) = self._errModelRange
            if self._modelID in modelIDs[int(beg):int(end)]:
                raise RuntimeError(
                    "Exiting with error due to errModelRange parameter")

        # =========================================================================
        # Delay, if necessary
        if self._delay is not None:
            time.sleep(self._delay)

        # =========================================================================
        # Run it!
        # =========================================================================
        self._currentRecordIndex = 0
        while True:

            # =========================================================================
            # Check if the model should be stopped
            # =========================================================================

            # If killed by a terminator, stop running
            if self._isKilled:
                break

            # If job stops or hypersearch ends, stop running
            if self._isCanceled:
                break

            # If model is mature, stop running ONLY IF  we are not the best model
            # for the job. Otherwise, keep running so we can keep returning
            # predictions to the user
            if self._isMature:
                if not self._isBestModel:
                    self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED
                    break
                else:
                    self._cmpReason = self._jobsDAO.CMPL_REASON_EOF

            # =========================================================================
            # Get the the next record, and "write it"
            # =========================================================================
            try:
                self._currentRecordIndex = next(iterTracker)
            except StopIteration:
                break

            # "Write" a dummy output value. This is used to test that the batched
            # writing works properly

            self._writePrediction(ModelResult(None, None, None, None))

            periodic.tick()

            # =========================================================================
            # Compute wait times. See if model should exit
            # =========================================================================

            if self.__shouldSysExit(self._currentRecordIndex):
                sys.exit(1)

            # Simulate computation time
            if self._busyWaitTime is not None:
                time.sleep(self._busyWaitTime)
                self.__computeWaitTime()

            # Asked to abort after so many iterations?
            if doSysExit:
                sys.exit(1)

            # Asked to raise a jobFailException?
            if self._jobFailErr:
                raise utils.JobFailException(
                    "E10000", "dummyModel's jobFailErr was True.")

        # =========================================================================
        # Handle final operations
        # =========================================================================
        if self._doFinalize:
            if not self._makeCheckpoint:
                self._model = None

            # Delay finalization operation
            if self._finalDelay is not None:
                time.sleep(self._finalDelay)

            self._finalize()

        self._logger.info("Finished: modelID=%r " % (self._modelID))

        return (self._cmpReason, None)