コード例 #1
0
    def convertNupicEnvToOPF(self):
        """
    TODO: document
    """

        # We need to create a task structure, most of which is taken verbatim
        # from the Nupic control dict
        task = dict(self.__control)

        task.pop('environment')
        inferenceArgs = task.pop('inferenceArgs')
        task['taskLabel'] = 'DefaultTask'

        # Create the iterationCycle element that will be placed inside the
        #  taskControl.
        iterationCount = task.get('iterationCount', -1)
        iterationCountInferOnly = task.pop('iterationCountInferOnly', 0)
        if iterationCountInferOnly == -1:
            iterationCycle = [
                IterationPhaseSpecInferOnly(1000, inferenceArgs=inferenceArgs)
            ]
        elif iterationCountInferOnly > 0:
            assert iterationCount > 0, "When iterationCountInferOnly is specified, "\
              "iterationCount must also be specified and not be -1"
            iterationCycle = [
                IterationPhaseSpecLearnAndInfer(iterationCount -
                                                iterationCountInferOnly,
                                                inferenceArgs=inferenceArgs),
                IterationPhaseSpecInferOnly(iterationCountInferOnly,
                                            inferenceArgs=inferenceArgs)
            ]
        else:
            iterationCycle = [
                IterationPhaseSpecLearnAndInfer(1000,
                                                inferenceArgs=inferenceArgs)
            ]

        taskControl = dict(metrics=task.pop('metrics'),
                           loggedMetrics=task.pop('loggedMetrics'),
                           iterationCycle=iterationCycle)
        task['taskControl'] = taskControl

        # Create the new control
        self.__control = dict(environment=OpfEnvironment.Nupic, tasks=[task])
コード例 #2
0
ファイル: description.py プロジェクト: swadhwa1/nupic
      # terminated when either number of iterations reaches iterationCount or
      # all records in the (possibly aggregated) database have been processed,
      # whichever occurs first.
      #
      # iterationCount of -1 = iterate over the entire dataset
      'iterationCount' : -1,


      # Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
      'taskControl' : {

        # Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
        # instances.
        'iterationCycle' : [
          #IterationPhaseSpecLearnOnly(1000),
          IterationPhaseSpecLearnAndInfer(1000),
          #IterationPhaseSpecInferOnly(10),
        ],

        # Metrics: A list of MetricSpecs that instantiate the metrics that are
        # computed for this experiment
        'metrics':[
          MetricSpec(metric='avg_err', inferenceElement='classification',
                     params={'window': 200}),
          MetricSpec(metric='neg_auc', inferenceElement='classConfidences',
                     params={'window': 200, 'computeEvery': 10}),
        ],

        # Logged Metrics: A sequence of regular expressions that specify which of
        # the metrics from the Inference Specifications section MUST be logged for
        # every prediction. The regex's correspond to the automatically generated
コード例 #3
0
ファイル: description.py プロジェクト: mrcslws/nupic
    # terminated when either number of iterations reaches iterationCount or
    # all records in the (possibly aggregated) database have been processed,
    # whichever occurs first.
    #
    # iterationCount of -1 = iterate over the entire dataset
    'iterationCount' : -1,


    # Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
    'taskControl' : {

      # Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
      # instances.
      'iterationCycle' : [
        #IterationPhaseSpecLearnOnly(1000),
        IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
        #IterationPhaseSpecInferOnly(10, inferenceArgs=None),
      ],

      'metrics' : [
      ],

      # Logged Metrics: A sequence of regular expressions that specify which of
      # the metrics from the Inference Specifications section MUST be logged for
      # every prediction. The regex's correspond to the automatically generated
      # metric labels. This is similar to the way the optimization metric is
      # specified in permutations.py.
      'loggedMetrics': ['.*nupicScore.*'],


      # Callbacks for experimentation/research (optional)
コード例 #4
0
      # terminated when either number of iterations reaches iterationCount or
      # all records in the (possibly aggregated) database have been processed,
      # whichever occurs first.
      #
      # iterationCount of -1 = iterate over the entire dataset
      'iterationCount' : -1,


      # Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
      'taskControl' : {

        # Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
        # instances.
        'iterationCycle' : [
          #IterationPhaseSpecLearnOnly(1000),
          IterationPhaseSpecLearnAndInfer(1000, dict(predictedField="consumption")),
          #IterationPhaseSpecInferOnly(10),
        ],

        'metrics' :[
          MetricSpec(metric='rmse',
                     field="consumption",
                     inferenceElement=InferenceElement.prediction),
        ],

        # Callbacks for experimentation/research (optional)
        'callbacks' : {
          # Callbacks to be called at the beginning of a task, before model iterations.
          # Signature: callback(<reference to OPFExperiment>); returns nothing
          'setup' : [],