Пример #1
0
        1
    },

    # Iteration count: maximum number of iterations.  Each iteration corresponds
    # to one record from the (possibly aggregated) dataset.  The task is
    # terminated when either number of iterations reaches iterationCount or
    # all records in the (possibly aggregated) database have been processed,
    # whichever occurs first.
    #
    # iterationCount of -1 = iterate over the entire dataset
    #'iterationCount' : ITERATION_COUNT,

    # Metrics: A list of MetricSpecs that instantiate the metrics that are
    # computed for this experiment
    'metrics': [
        MetricSpec(field=u'consumption',
                   inferenceElement=InferenceElement.prediction,
                   metric='rmse'),
    ],

    # Logged Metrics: A sequence of regular expressions that specify which of
    # the metrics from the Inference Specifications section MUST be logged for
    # every prediction. The regex's correspond to the automatically generated
    # metric labels. This is similar to the way the optimization metric is
    # specified in permutations.py.
    'loggedMetrics': ['.*nupicScore.*'],
}

descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
                                                control=control)
Пример #2
0
            # Logged Metrics: A sequence of regular expressions that specify which of
            # the metrics from the Inference Specifications section MUST be logged for
            # every prediction. The regex's correspond to the automatically generated
            # metric labels. This is similar to the way the optimization metric is
            # specified in permutations.py.
            'loggedMetrics': [],

            # Callbacks for experimentation/research (optional)
            'callbacks': {
                # Callbacks to be called at the beginning of a task, before model iterations.
                # Signature: callback(<reference to OPFExperiment>); returns nothing
                'setup': [
                    htmPredictionModelControlEnableSPLearningCb,
                    htmPredictionModelControlEnableTPLearningCb
                ],

                # Callbacks to be called after every learning/inference iteration
                # Signature: callback(<reference to OPFExperiment>); returns nothing
                'postIter': [],

                # Callbacks to be called when the experiment task is finished
                # Signature: callback(<reference to OPFExperiment>); returns nothing
                'finish': []
            }
        }  # End of taskControl
    },  # End of task
]

descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
                                                taskList=tasks)