Example #1
0
 def testTrivialAAE(self):
     trivialaae = getModule(
         MetricSpec("trivial", None, None, {
             "verbosity": OPFMetricsTest.VERBOSITY,
             "errorMetric": "aae"
         }))
     gt = [i / 4 + 1 for i in range(100)]
     p = [i for i in range(100)]
     for i in xrange(len(gt)):
         trivialaae.addInstance(gt[i], p[i])
     target = .25
     self.assertTrue(abs(trivialaae.getMetric()["value"]-target) \
 < OPFMetricsTest.DELTA)
Example #2
0
 def testTrivialAccuracy(self):
     trivialaccuracy = getModule(
         MetricSpec("trivial", None, None, {
             "verbosity": OPFMetricsTest.VERBOSITY,
             "errorMetric": "acc"
         }))
     gt = [str(i / 4 + 1) for i in range(100)]
     p = [str(i) for i in range(100)]
     for i in xrange(len(gt)):
         trivialaccuracy.addInstance(gt[i], p[i])
     target = .75
     self.assertTrue(abs(trivialaccuracy.getMetric()["value"]-target) \
 < OPFMetricsTest.DELTA)
Example #3
0
 def defineMetricSpecs(self):
     """
     Define the metric properties for nupic model
     :return:
     """
     metricSpecs = (MetricSpec(field=self.fieldToPredict,
                               metric='multiStep',
                               inferenceElement='multiStepBestPredictions',
                               params={
                                   'errorMetric': 'aae',
                                   'window': 1000,
                                   'steps': self.steps
                               }),
                    MetricSpec(field=self.fieldToPredict,
                               metric='trivial',
                               inferenceElement='prediction',
                               params={
                                   'errorMetric': 'aae',
                                   'window': 1000,
                                   'steps': self.steps
                               }),
                    MetricSpec(field=self.fieldToPredict,
                               metric='multiStep',
                               inferenceElement='multiStepBestPredictions',
                               params={
                                   'errorMetric': 'altMAPE',
                                   'window': 1000,
                                   'steps': self.steps
                               }),
                    MetricSpec(field=self.fieldToPredict,
                               metric='trivial',
                               inferenceElement='prediction',
                               params={
                                   'errorMetric': 'altMAPE',
                                   'window': 1000,
                                   'steps': self.steps
                               }))
     return metricSpecs
Example #4
0
 def testWindowedTrivialAAE(self):
     """Trivial Average Error metric test"""
     trivialAveErr = getModule(
         MetricSpec("trivial", None, None, {
             "verbosity": OPFMetricsTest.VERBOSITY,
             "errorMetric": "avg_err"
         }))
     gt = [str(i / 4 + 1) for i in range(100)]
     p = [str(i) for i in range(100)]
     for i in xrange(len(gt)):
         trivialAveErr.addInstance(gt[i], p[i])
     target = .25
     self.assertTrue(abs(trivialAveErr.getMetric()["value"]-target)\
 < OPFMetricsTest.DELTA)
Example #5
0
    def testWindowedRMSE(self):
        wrmse = getModule(
            MetricSpec("rmse", None, None, {
                "verbosity": OPFMetricsTest.VERBOSITY,
                "window": 3
            }))
        gt = [9, 4, 4, 100, 44]
        p = [0, 13, 4, 6, 7]
        for gv, pv in zip(gt, p):
            wrmse.addInstance(gv, pv)
        target = 58.324

        self.assertTrue (abs(wrmse.getMetric()["value"]-target)\
    < OPFMetricsTest.DELTA)
Example #6
0
  def testMultistepAAE(self):
    """Multistep AAE metric test"""
    msp = getModule(MetricSpec("multiStep", None, None,
     {"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
           "steps": 3}))
    
    # Make each ground truth 1 greater than the prediction
    gt = [i+1 for i in range(100)]
    p = [{3: {i: .7, 5: 0.3}} for i in range(100)]

    for i in xrange(len(gt)):
      msp.addInstance(gt[i], p[i])
    target = 1
    self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
Example #7
0
 def testWindowedAAE(self):
     """Windowed AAE"""
     waae = getModule(
         MetricSpec("aae", None, None, {
             "verbosity": OPFMetricsTest.VERBOSITY,
             "window": 1
         }))
     gt = [9, 4, 5, 6]
     p = [0, 13, 8, 3]
     for i in xrange(len(gt)):
         waae.addInstance(gt[i], p[i])
     target = 3.0
     self.assertTrue( abs(waae.getMetric()["value"]-target) \
 < OPFMetricsTest.DELTA, "Got %s" %waae.getMetric())
Example #8
0
    def testLongWindowRMSE(self):
        """RMSE"""
        rmse = getModule(
            MetricSpec("rmse", None, None, {
                "verbosity": OPFMetricsTest.VERBOSITY,
                "window": 100
            }))
        gt = [9, 4, 5, 6]
        p = [0, 13, 8, 3]
        for i in xrange(len(gt)):
            rmse.addInstance(gt[i], p[i])
        target = 6.71

        self.assertTrue(abs(rmse.getMetric()["value"]-target)\
    < OPFMetricsTest.DELTA)
Example #9
0
 def testMultistepProbabilityMultipleSteps(self):
   """Multistep with probabilities metric test, predicting 2 different step
   sizes"""
   msp = getModule(MetricSpec("multiStepProbability", None, None,
         {"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,
          "errorMetric":"aae", "steps": [1,3]}))
   gt = [5 for i in range(1000)]
   p = [{3: {i: .3, 5: .7},
         1: {5: 1.0}} for i in range(1000)]
   for i in xrange(len(gt)):
     msp.addInstance(gt[i], p[i])
   #(((999-5)(1000-5)/2-(899-5)(900-5)/2)*.3/100) / 2
   #  / 2 because the 1-step prediction is 100% accurate
   target = 283.35/2
   self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
Example #10
0
  def testMultistepAAEMultipleSteps(self):
    """Multistep AAE metric test, predicting 2 different step sizes"""
    msp = getModule(MetricSpec("multiStep", None, None,
     {"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
           "steps": [3,6]}))
    
    # Make each 3 step prediction +1 over ground truth and each 6 step
    # prediction +0.5 over ground truth
    gt = [i for i in range(100)]
    p = [{3: {i+1: .7, 5: 0.3},
          6: {i+0.5: .7, 5: 0.3}} for i in range(100)]

    for i in xrange(len(gt)):
      msp.addInstance(gt[i], p[i])
    target = 0.75  # average of +1 error and 0.5 error
    self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def _testMetricLabels():
    print "\n*Testing Metric Label Generation*..."

    from nupic.frameworks.opf.metrics import MetricSpec

    testTuples = [
        (MetricSpec('rmse', InferenceElement.prediction,
                    'consumption'), "prediction:rmse:field=consumption"),
        (MetricSpec('rmse',
                    InferenceElement.classification), "classification:rmse"),
        (MetricSpec('rmse',
                    InferenceElement.encodings,
                    'pounds',
                    params=dict(window=100)),
         "encodings:rmse:window=100:field=pounds"),
        (MetricSpec('aae',
                    InferenceElement.prediction,
                    'pounds',
                    params=dict(window=100, paramA=10.2, paramB=20)),
         "prediction:aae:paramA=10.2:paramB=20:window=100:field=pounds"),
        (MetricSpec('aae',
                    InferenceElement.prediction,
                    'pounds',
                    params={
                        'window': 100,
                        'paramA': 10.2,
                        '1paramB': 20
                    }),
         "prediction:aae:1paramB=20:paramA=10.2:window=100:field=pounds"),
        (MetricSpec('aae',
                    InferenceElement.prediction,
                    'pounds',
                    params=dict(window=100, paramA=10.2, paramB=-20)),
         "prediction:aae:paramA=10.2:paramB=-20:window=100:field=pounds"),
        (MetricSpec('aae',
                    InferenceElement.prediction,
                    'pounds',
                    params=dict(window=100, paramA=10.2, paramB='square')),
         "prediction:aae:paramA=10.2:paramB='square':window=100:field=pounds"),
    ]

    for test in testTuples:
        try:
            assert test[0].getLabel() == test[1]
        except:
            print "Failed Creating label"
            print "Expected %s \t Got %s" % (test[1], test[0].getLabel())
            return

    print "ok"
Example #12
0
  def testMovingMeanRMSE(self):
    """Moving mean RMSE metric test"""
    movingMeanRMSE = getModule(MetricSpec("moving_mean", None, None,
         {"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mean_window":3,
          "errorMetric":"rmse"}))
    gt = [i for i in range(890)]
    gt.extend([2*i for i in range(110)])
    p = [i for i in range(1000)]
    res = []
    for i in xrange(len(gt)):
      movingMeanRMSE.addInstance(gt[i], p[i])
      res.append(movingMeanRMSE.getMetric()["value"])
    self.assertTrue(max(res[1:890]) == 2.0)
    self.assertTrue(min(res[891:])>=4.0)
    target = 4.0
    self.assertTrue(abs(movingMeanRMSE.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
Example #13
0
 def testMultistepProbability(self):
     """Multistep with probabilities metric test"""
     msp = getModule(
         MetricSpec(
             "multiStepProbability", None, None, {
                 "verbosity": OPFMetricsTest.VERBOSITY,
                 "window": 100,
                 "errorMetric": "aae",
                 "steps": 3
             }))
     gt = [5 for i in range(1000)]
     p = [{3: {i: .3, 5: .7}} for i in range(1000)]
     for i in xrange(len(gt)):
         msp.addInstance(gt[i], p[i])
     #((999-5)(1000-5)/2-(899-5)(900-5)/2)*.3/100
     target = 283.35
     self.assertTrue(
         abs(msp.getMetric()["value"] - target) < OPFMetricsTest.DELTA)
Example #14
0
 def _init_model(self, model):
     model.enableInference({"predictedField": self.PredictedField})
     metrics = ['aae', 'altMAPE', 'rmse']
     windows = [
         self.PredictedSteps * 100, self.PredictedSteps * 10,
         self.PredictedSteps
     ]
     metric_specs = list()
     for w in windows:
         for m in metrics:
             metric_specs.append(
                 MetricSpec(field=self.PredictedField,
                            metric='multiStep',
                            inferenceElement='multiStepBestPredictions',
                            params={
                                'errorMetric': m,
                                'window': w,
                                'steps': self.PredictedSteps
                            }))
     self.Metrics = MetricsManager(metric_specs, model.getFieldInfo(),
                                   model.getInferenceType())
     self.Model = model
Example #15
0
    def testTwoGramStrings(self):
        """One gram string test"""
        oneGram = getModule(
            MetricSpec(
                "two_gram", None, None, {
                    "verbosity": OPFMetricsTest.VERBOSITY,
                    "window": 100,
                    "errorMetric": "acc",
                    "predictionField": "test"
                }))

        # Sequences of "0", "1", "2", "3", "4", "0", "1", ...
        gt = [str(i % 5) for i in range(1000)]
        encodings = [np.zeros(10) for i in range(5)]
        for i in range(len(encodings)):
            encoding = encodings[i]
            encoding[i] = 1

        # Make every 5th element random
        newElem = 100
        for i in range(5, 1000, 5):
            gt[i] = str(newElem)
            newElem += 20

        res = []
        for i in xrange(len(gt)):
            if i == 20:
                # Make sure we don"t barf with missing values
                oneGram.addInstance(np.zeros(10),
                                    prediction=None,
                                    record={"test": None})
            else:
                oneGram.addInstance(encodings[i % 5],
                                    prediction=None,
                                    record={"test": gt[i]})
            res.append(oneGram.getMetric()["value"])
        target = .8
        self.assertTrue(abs(oneGram.getMetric()["value"]-target)\
    < OPFMetricsTest.DELTA)
Example #16
0
  def testMovingModeAccuracy(self):
    """Moving mode Accuracy metric test"""
    movingModeACC = getModule(MetricSpec("moving_mode", None, None,
       {"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mode_window":3,
        "errorMetric":"acc"}))
    #Should initially asymptote to .5
    #Then after 900 should go to 0.0 as the predictions will always be offset
    gt = [i/4 for i in range(900)]
    gt.extend([2*i/4 for i in range(100)])
    p = [i for i in range(1000)]
    res = []
    for i in xrange(len(gt)):
      movingModeACC.addInstance(gt[i], p[i])
      res.append(movingModeACC.getMetric()["value"])
    #Make sure that there is no point where the average acc is <.5
    self.assertTrue(min(res[1:899]) == .5)
    #Make sure that after the statistics switch the acc goes to 0.0
    self.assertTrue(max(res[900:])<=.5)
    #Make sure that the statistics change is still noticeable while it
    #is in the window
    self.assertTrue(res[998]>0.0)
    target = 0.0
    self.assertTrue(abs(movingModeACC.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
Example #17
0
  "Starts a NuPIC model from the model params returned by the swarm\n"
  "and pushes each line of input from the gym into the model. Results\n"
  "are written to an output file (default) or plotted dynamically if\n"
  "the --plot option is specified.\n"
  "NOTE: You must run ./swarm.py before this, because model parameters\n"
  "are required to run NuPIC.\n"
)
GYM_NAME = "rec-center-hourly"  # or use "rec-center-every-15m-large"
DATA_DIR = "."
MODEL_PARAMS_DIR = "./model_params"
# '7/2/10 0:00'
DATE_FORMAT = "%m/%d/%y %H:%M"

_METRIC_SPECS = (
    MetricSpec(field='kw_energy_consumption', metric='multiStep',
               inferenceElement='multiStepBestPredictions',
               params={'errorMetric': 'aae', 'window': 1000, 'steps': 1}),
    MetricSpec(field='kw_energy_consumption', metric='trivial',
               inferenceElement='prediction',
               params={'errorMetric': 'aae', 'window': 1000, 'steps': 1}),
    MetricSpec(field='kw_energy_consumption', metric='multiStep',
               inferenceElement='multiStepBestPredictions',
               params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': 1}),
    MetricSpec(field='kw_energy_consumption', metric='trivial',
               inferenceElement='prediction',
               params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': 1}),
)

def createModel(modelParams):
  model = ModelFactory.create(modelParams)
  model.enableInference({"predictedField": "kw_energy_consumption"})
Example #18
0
            'iterationCount': -1,

            # Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
            'taskControl': {

                # Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX
                # instances.
                'iterationCycle': [
                    #IterationPhaseSpecLearnOnly(1000),
                    IterationPhaseSpecLearnAndInfer(
                        1000, dict(predictedField="consumption")),
                    #IterationPhaseSpecInferOnly(10),
                ],
                'metrics': [
                    MetricSpec(metric='rmse',
                               field="consumption",
                               inferenceElement=InferenceElement.prediction),
                ],

                # Callbacks for experimentation/research (optional)
                'callbacks': {
                    # Callbacks to be called at the beginning of a task, before model iterations.
                    # Signature: callback(<reference to OPFExperiment>); returns nothing
                    'setup': [],

                    # Callbacks to be called after every learning/inference iteration
                    # Signature: callback(<reference to OPFExperiment>); returns nothing
                    'postIter': [],

                    # Callbacks to be called when the experiment task is finished
                    # Signature: callback(<reference to OPFExperiment>); returns nothing
Example #19
0
  # to one record from the (possibly aggregated) dataset.  The task is
  # terminated when either number of iterations reaches iterationCount or
  # all records in the (possibly aggregated) database have been processed,
  # whichever occurs first.
  #
  # iterationCount of -1 = iterate over the entire dataset
  'iterationCount' : -1,


  # A dictionary containing all the supplementary parameters for inference
  "inferenceArgs":{u'predictedField': u'f', 'predictionSteps': [1]},

  # Metrics: A list of MetricSpecs that instantiate the metrics that are
  # computed for this experiment
  'metrics':[
    MetricSpec(field=u'f', metric='aae', inferenceElement='prediction', params={'window': 100}),
  ],

  # Logged Metrics: A sequence of regular expressions that specify which of
  # the metrics from the Inference Specifications section MUST be logged for
  # every prediction. The regex's correspond to the automatically generated
  # metric labels. This is similar to the way the optimization metric is
  # specified in permutations.py.
  'loggedMetrics': ['.*nupicScore.*'],
}



descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
                                                control=control)
Example #20
0
    # Logged Metrics: A sequence of regular expressions that specify which of
    # the metrics from the Inference Specifications section MUST be logged for
    # every prediction. The regex's correspond to the automatically generated
    # metric labels. This is similar to the way the optimization metric is
    # specified in permutations.py.
    'loggedMetrics': ['.*aae.*'],
}

# Add multi-step prediction metrics
for steps in config['predictionSteps']:
    control['metrics'].append(
        MetricSpec(field=config['predictedField'],
                   metric='multiStep',
                   inferenceElement='multiStepBestPredictions',
                   params={
                       'errorMetric': 'aae',
                       'window': 1000,
                       'steps': steps
                   }))
    control['metrics'].append(
        MetricSpec(field=config['predictedField'],
                   metric='trivial',
                   inferenceElement='prediction',
                   params={
                       'errorMetric': 'aae',
                       'window': 1000,
                       'steps': steps
                   }))
    control['metrics'].append(
        MetricSpec(field=config['predictedField'],
                   metric='multiStep',
Example #21
0
    #
    # iterationCount of -1 = iterate over the entire dataset
    'iterationCount':
    -1,

    # A dictionary containing all the supplementary parameters for inference
    "inferenceArgs": {
        'predictedField': config['predictedField'],
        'predictionSteps': config['predictionSteps']
    },

    # Metrics: A list of MetricSpecs that instantiate the metrics that are
    # computed for this experiment
    'metrics': [
        MetricSpec(field=config['predictedField'],
                   metric=metricName,
                   inferenceElement='prediction',
                   params={'window': config['windowSize']}),
        MetricSpec(field=config['predictedField'],
                   metric='trivial',
                   inferenceElement='prediction',
                   params={
                       'errorMetric': metricName,
                       'window': config['windowSize']
                   }),
    ],

    # Logged Metrics: A sequence of regular expressions that specify which of
    # the metrics from the Inference Specifications section MUST be logged for
    # every prediction. The regex's correspond to the automatically generated
    # metric labels. This is similar to the way the optimization metric is
    # specified in permutations.py.
Example #22
0
    # iterationCount of -1 = iterate over the entire dataset
    #'iterationCount' : ITERATION_COUNT,

    # A dictionary containing all the supplementary parameters for inference
    "inferenceArgs": {
        u'predictedField': u'consumption',
        u'predictionSteps': [0]
    },

    # Metrics: A list of MetricSpecs that instantiate the metrics that are
    # computed for this experiment
    'metrics': [
        MetricSpec(field=u'consumption',
                   metric='multiStep',
                   inferenceElement='multiStepBestPredictions',
                   params={
                       'window': 1000,
                       'steps': [0],
                       'errorMetric': 'avg_err'
                   })
    ],

    # Logged Metrics: A sequence of regular expressions that specify which of
    # the metrics from the Inference Specifications section MUST be logged for
    # every prediction. The regex's correspond to the automatically generated
    # metric labels. This is similar to the way the optimization metric is
    # specified in permutations.py.
    'loggedMetrics': ['.*'],
}

descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
                                                control=control)
    -1,

    # A dictionary containing all the supplementary parameters for inference
    "inferenceArgs": {
        u'inputPredictedField': 'auto',
        u'predictedField': u'word',
        u'predictionSteps': [1, 2, 3, 4, 5, 6]
    },

    # Metrics: A list of MetricSpecs that instantiate the metrics that are
    # computed for this experiment
    'metrics': [
        MetricSpec(field=u'word',
                   metric='multiStep',
                   inferenceElement='multiStepBestPredictions',
                   params={
                       'window': 1000,
                       'steps': [1, 2, 3, 4, 5, 6],
                       'errorMetric': 'avg_err'
                   })
    ],

    # Logged Metrics: A sequence of regular expressions that specify which of
    # the metrics from the Inference Specifications section MUST be logged for
    # every prediction. The regex's correspond to the automatically generated
    # metric labels. This is similar to the way the optimization metric is
    # specified in permutations.py.
    'loggedMetrics': ['.*'],
}

################################################################################
################################################################################
Example #24
0
  # to one record from the (possibly aggregated) dataset.  The task is
  # terminated when either number of iterations reaches iterationCount or
  # all records in the (possibly aggregated) database have been processed,
  # whichever occurs first.
  #
  # iterationCount of -1 = iterate over the entire dataset
  'iterationCount' : 20,


  # A dictionary containing all the supplementary parameters for inference
  "inferenceArgs":{u'predictedField': u'value', u'predictionSteps': [1, 5]},

  # Metrics: A list of MetricSpecs that instantiate the metrics that are
  # computed for this experiment
  'metrics':[
    MetricSpec(field=u'value', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 10, 'steps': 1, 'errorMetric': 'aae'}),
    MetricSpec(field=u'value', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 10, 'steps': 5, 'errorMetric': 'aae'}),
  ],

  # Logged Metrics: A sequence of regular expressions that specify which of
  # the metrics from the Inference Specifications section MUST be logged for
  # every prediction. The regex's correspond to the automatically generated
  # metric labels. This is similar to the way the optimization metric is
  # specified in permutations.py.
  'loggedMetrics': ['.*nupicScore.*'],
}



descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
                                                control=control)
Example #25
0
        1
    },

    # Iteration count: maximum number of iterations.  Each iteration corresponds
    # to one record from the (possibly aggregated) dataset.  The task is
    # terminated when either number of iterations reaches iterationCount or
    # all records in the (possibly aggregated) database have been processed,
    # whichever occurs first.
    #
    # iterationCount of -1 = iterate over the entire dataset
    #'iterationCount' : ITERATION_COUNT,

    # Metrics: A list of MetricSpecs that instantiate the metrics that are
    # computed for this experiment
    'metrics': [
        MetricSpec(field=u'consumption',
                   inferenceElement=InferenceElement.prediction,
                   metric='rmse'),
    ],

    # Logged Metrics: A sequence of regular expressions that specify which of
    # the metrics from the Inference Specifications section MUST be logged for
    # every prediction. The regex's correspond to the automatically generated
    # metric labels. This is similar to the way the optimization metric is
    # specified in permutations.py.
    'loggedMetrics': ['.*nupicScore.*'],
}

descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
                                                control=control)
Example #26
0
    #
    # iterationCount of -1 = iterate over the entire dataset
    'iterationCount':
    -1,

    # A dictionary containing all the supplementary parameters for inference
    "inferenceArgs": {
        u'predictedField': u'f',
        u'predictionSteps': [1]
    },

    # Metrics: A list of MetricSpecs that instantiate the metrics that are
    # computed for this experiment
    'metrics': [
        MetricSpec(field=u'f',
                   metric='passThruPrediction',
                   inferenceElement='anomalyScore',
                   params={'window': 1000}),
    ],

    # Logged Metrics: A sequence of regular expressions that specify which of
    # the metrics from the Inference Specifications section MUST be logged for
    # every prediction. The regex's correspond to the automatically generated
    # metric labels. This is similar to the way the optimization metric is
    # specified in permutations.py.
    'loggedMetrics': ['.*'],
}

################################################################################
################################################################################
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
                                                control=control)
Example #27
0
            # Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
            'taskControl': {

                # Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX
                # instances.
                'iterationCycle': [
                    #IterationPhaseSpecLearnOnly(1000),
                    IterationPhaseSpecLearnAndInfer(1000),
                    #IterationPhaseSpecInferOnly(10),
                ],

                # Metrics: A list of MetricSpecs that instantiate the metrics that are
                # computed for this experiment
                'metrics': [
                    MetricSpec(metric='avg_err',
                               inferenceElement='classification',
                               params={'window': 200}),
                    MetricSpec(metric='neg_auc',
                               inferenceElement='classConfidences',
                               params={
                                   'window': 200,
                                   'computeEvery': 10
                               }),
                ],

                # Logged Metrics: A sequence of regular expressions that specify which of
                # the metrics from the Inference Specifications section MUST be logged for
                # every prediction. The regex's correspond to the automatically generated
                # metric labels. This is similar to the way the optimization metric is
                # specified in permutations.py.
                'loggedMetrics': ['.*avg_err.*', '.*auc.*'],
  # all records in the (possibly aggregated) database have been processed,
  # whichever occurs first.
  #
  # iterationCount of -1 = iterate over the entire dataset
  'iterationCount' : -1,


  # A dictionary containing all the supplementary parameters for inference
  "inferenceArgs":{u'inputPredictedField': 'auto',
 u'predictedField': u'rlg_price',
 u'predictionSteps': [1, 30, 180, 360]},

  # Metrics: A list of MetricSpecs that instantiate the metrics that are
  # computed for this experiment
  'metrics':[
    MetricSpec(field=u'rlg_price', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [1, 30, 180, 360], 'errorMetric': 'aae'}),
    MetricSpec(field=u'rlg_price', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [1, 30, 180, 360], 'errorMetric': 'altMAPE'})
  ],

  # Logged Metrics: A sequence of regular expressions that specify which of
  # the metrics from the Inference Specifications section MUST be logged for
  # every prediction. The regex's correspond to the automatically generated
  # metric labels. This is similar to the way the optimization metric is
  # specified in permutations.py.
  'loggedMetrics': ['.*'],
}

################################################################################
################################################################################
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
                                                control=control)
Example #29
0
  # to one record from the (possibly aggregated) dataset.  The task is
  # terminated when either number of iterations reaches iterationCount or
  # all records in the (possibly aggregated) database have been processed,
  # whichever occurs first.
  #
  # iterationCount of -1 = iterate over the entire dataset
  'iterationCount' : -1,


  # A dictionary containing all the supplementary parameters for inference
  "inferenceArgs":{u'predictedField': u'c1', u'predictionSteps': [24]},

  # Metrics: A list of MetricSpecs that instantiate the metrics that are
  # computed for this experiment
  'metrics':[
    MetricSpec(field=u'c1', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [24], 'errorMetric': 'altMAPE'}),
  ],

  # Logged Metrics: A sequence of regular expressions that specify which of
  # the metrics from the Inference Specifications section MUST be logged for
  # every prediction. The regex's correspond to the automatically generated
  # metric labels. This is similar to the way the optimization metric is
  # specified in permutations.py.
  'loggedMetrics': ['.*'],
}



descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
                                                control=control)
Example #30
0
    # all records in the (possibly aggregated) database have been processed,
    # whichever occurs first.
    #
    # iterationCount of -1 = iterate over the entire dataset
    'iterationCount':
    -1,

    # A dictionary containing all the supplementary parameters for inference
    "inferenceArgs":
    None,

    # Metrics: A list of MetricSpecs that instantiate the metrics that are
    # computed for this experiment
    'metrics': [
        MetricSpec(field=u'f',
                   metric='aae',
                   inferenceElement='prediction',
                   params={'window': 1000}),
        MetricSpec(field=u'f',
                   metric='grokScore_scalar',
                   inferenceElement='encodings',
                   params={
                       'frequencyWindow': 1000,
                       'movingAverageWindow': 1000
                   }),
        MetricSpec(field=u'f',
                   metric='grokScore_scalar',
                   inferenceElement='encodings',
                   params={'frequencyWindow': 1000})
    ],

    # Logged Metrics: A sequence of regular expressions that specify which of