Exemplo n.º 1
0
def main():
  """Run according to options in sys.argv"""
  # Init the NuPic logging configuration from the nupic-logging.conf configuration
  # file. This is found either in the NTA_CONF_DIR directory (if defined) or
  # in the 'conf' subdirectory of the NuPic install location.
  nupic.support.initLogging(verbose=True)

  # Initialize PRNGs
  initExperimentPrng()

  # Run it!
  runExperiment(sys.argv[1:])
Exemplo n.º 2
0
def main():
    """Run according to options in sys.argv"""
    nupic.support.initLogging(verbose=True)

    # Initialize pseudo-random number generators (PRNGs)
    #
    # This will fix the seed that is used by numpy when generating 'random'
    # numbers. This allows for repeatability across experiments.
    initExperimentPrng()

    # Run it!
    runExperiment(sys.argv[1:])
Exemplo n.º 3
0
def main():
  """Run according to options in sys.argv"""
  nupic.support.initLogging(verbose=True)

  # Initialize pseudo-random number generators (PRNGs)
  #
  # This will fix the seed that is used by numpy when generating 'random'
  # numbers. This allows for repeatability across experiments.
  initExperimentPrng()

  # Run it!
  runExperiment(sys.argv[1:])
Exemplo n.º 4
0
def runReducedExperiment(path, reduced=True):
  """
  Run the experiment in the <path> with a reduced iteration count
  """

  initExperimentPrng()
  
  # Load experiment
  if reduced:
    args = [path, '--testMode']
  else:
    args = [path]
    
  runExperiment(args)
Exemplo n.º 5
0
def runReducedExperiment(path, reduced=True):
  """
  Run the experiment in the <path> with a reduced iteration count
  """

  initExperimentPrng()
  
  # Load experiment
  if reduced:
    args = [path, '--testMode']
  else:
    args = [path]
    
  runExperiment(args)
Exemplo n.º 6
0
def main():
  """Run according to options in sys.argv"""
  # Init the NuPic logging configuration from the nupic-logging.conf configuration
  # file. This is found either in the NTA_CONF_DIR directory (if defined) or
  # in the 'conf' subdirectory of the NuPic install location.
  nupic.support.initLogging(verbose=True)

  # Initialize pseudo-random number generators (PRNGs)
  #
  # This will fix the seed that is used by numpy when generating 'random'
  # numbers. This allows for repeatability across experiments.
  initExperimentPrng()

  # Run it!
  runExperiment(sys.argv[1:])
Exemplo n.º 7
0
def main():
    """Run according to options in sys.argv"""
    # Init the NuPic logging configuration from the nupic-logging.conf configuration
    # file. This is found either in the NTA_CONF_DIR directory (if defined) or
    # in the 'conf' subdirectory of the NuPic install location.
    nupic.support.initLogging(verbose=True)

    # Initialize pseudo-random number generators (PRNGs)
    #
    # This will fix the seed that is used by numpy when generating 'random'
    # numbers. This allows for repeatability across experiments.
    initExperimentPrng()

    # Run it!
    runExperiment(sys.argv[1:])
def main():
  """Run according to options in sys.argv and diff classifiers."""
  initLogging(verbose=True)

  # Initialize PRNGs
  initExperimentPrng()

  # Mock out the creation of the CLAClassifier.
  @staticmethod
  def _mockCreate(*args, **kwargs):
    kwargs.pop('implementation', None)
    return CLAClassifierDiff(*args, **kwargs)
  CLAClassifierFactory.create = _mockCreate

  # Run it!
  runExperiment(sys.argv[1:])
Exemplo n.º 9
0
def main():
  """Run according to options in sys.argv and diff classifiers."""
  initLogging(verbose=True)

  # Initialize PRNGs
  initExperimentPrng()

  # Mock out the creation of the SDRClassifier.
  @staticmethod
  def _mockCreate(*args, **kwargs):
    kwargs.pop('implementation', None)
    return SDRClassifierDiff(*args, **kwargs)
  SDRClassifierFactory.create = _mockCreate

  # Run it!
  runExperiment(sys.argv[1:])
  def testHotgymRegression(self):
    experimentDir = os.path.join(
      os.path.dirname(__file__).partition(
        os.path.normpath("tests/integration/nupic/opf"))[0],
        "examples", "opf", "experiments", "multistep", "hotgym")

    resultsDir = os.path.join(experimentDir, "inference")
    savedModelsDir = os.path.join(experimentDir, "savedmodels")
    try:
      _model = experiment_runner.runExperiment([experimentDir])

      resultsPath = os.path.join(
          resultsDir, "DefaultTask.TemporalMultiStep.predictionLog.csv")
      with open(resultsPath) as f:
        reader = csv.reader(f)
        headers = reader.next()
        self.assertEqual(headers[14],
                         "multiStepBestPredictions:multiStep:errorMetric='aae':"
                         "steps=1:window=1000:field=consumption")
        lastRow = collections.deque(reader, 1)[0]

      # Changes that affect prediction results will cause this test to fail. If
      # the change is understood and reviewers agree that there has not been a
      # regression then this value can be updated to reflect the new result.
      self.assertAlmostEqual(float(lastRow[14]), 5.89191585339)

    finally:
      shutil.rmtree(resultsDir, ignore_errors=True)
      shutil.rmtree(savedModelsDir, ignore_errors=True)
Exemplo n.º 11
0
    def testHotgymRegression(self):
        experimentDir = os.path.join(
            os.path.dirname(__file__).partition(
                os.path.normpath("tests/integration/nupic/opf"))[0],
            "examples", "opf", "experiments", "multistep", "hotgym")

        resultsDir = os.path.join(experimentDir, "inference")
        savedModelsDir = os.path.join(experimentDir, "savedmodels")
        try:
            _model = experiment_runner.runExperiment([experimentDir])

            resultsPath = os.path.join(
                resultsDir, "DefaultTask.TemporalMultiStep.predictionLog.csv")
            with open(resultsPath) as f:
                reader = csv.reader(f)
                headers = reader.next()
                self.assertEqual(
                    headers[14],
                    "multiStepBestPredictions:multiStep:errorMetric='aae':"
                    "steps=1:window=1000:field=consumption")
                lastRow = collections.deque(reader, 1)[0]

            # Changes that affect prediction results will cause this test to fail. If
            # the change is understood and reviewers agree that there has not been a
            # regression then this value can be updated to reflect the new result.
            self.assertAlmostEqual(float(lastRow[14]), 5.84456654247)

        finally:
            shutil.rmtree(resultsDir, ignore_errors=True)
            shutil.rmtree(savedModelsDir, ignore_errors=True)
def main():
  """Run according to options in sys.argv and diff classifiers."""
  # Init the NuPic logging configuration from the nupic-logging.conf
  # configuration file. This is found either in the NTA_CONF_DIR directory
  # (if defined) or in the 'conf' subdirectory of the NuPic install location.
  initLogging(verbose=True)

  # Initialize PRNGs
  initExperimentPrng()

  # Mock out the creation of the CLAClassifier.
  @staticmethod
  def _mockCreate(*args, **kwargs):
    kwargs.pop('implementation', None)
    return CLAClassifierDiff(*args, **kwargs)
  CLAClassifierFactory.create = _mockCreate

  # Run it!
  runExperiment(sys.argv[1:])
Exemplo n.º 13
0
def main():
    """Run according to options in sys.argv and diff classifiers."""
    # Init the NuPic logging configuration from the nupic-logging.conf
    # configuration file. This is found either in the NTA_CONF_DIR directory
    # (if defined) or in the 'conf' subdirectory of the NuPic install location.
    initLogging(verbose=True)

    # Initialize PRNGs
    initExperimentPrng()

    # Mock out the creation of the CLAClassifier.
    @staticmethod
    def _mockCreate(*args, **kwargs):
        kwargs.pop('implementation', None)
        return CLAClassifierDiff(*args, **kwargs)

    CLAClassifierFactory.create = _mockCreate

    # Run it!
    runExperiment(sys.argv[1:])
Exemplo n.º 14
0
  def _testBackwardsCompatibility(experiment, checkpointName):
    """ Test that we can load in a checkpoint saved by an earlier version of
    the OPF.

    Parameters:
    -----------------------------------------------------------------------
    experiment:       Directory of the experiment.
    checkpointName:   which checkpoint to verify
    """

    # Get the experiment directories
    expDir = os.path.join(_EXPERIMENT_BASE, experiment)

    # Copy the pertinent checkpoint
    if os.path.exists(os.path.join(expDir, 'savedmodels')):
      shutil.rmtree(os.path.join(expDir, 'savedmodels'))
    shutil.copytree(src=os.path.join(expDir, checkpointName),
                    dst=os.path.join(expDir, 'savedmodels'))

    # Run it from the checkpoint
    _aPlusBExp = runExperiment(args=[expDir, '--load=DefaultTask',
                                     '--noCheckpoint'])
Exemplo n.º 15
0
    def _testBackwardsCompatibility(experiment, checkpointName):
        """ Test that we can load in a checkpoint saved by an earlier version of
    the OPF.

    Parameters:
    -----------------------------------------------------------------------
    experiment:       Directory of the experiment.
    checkpointName:   which checkpoint to verify
    """

        # Get the experiment directories
        expDir = os.path.join(_EXPERIMENT_BASE, experiment)

        # Copy the pertinent checkpoint
        if os.path.exists(os.path.join(expDir, 'savedmodels')):
            shutil.rmtree(os.path.join(expDir, 'savedmodels'))
        shutil.copytree(src=os.path.join(expDir, checkpointName),
                        dst=os.path.join(expDir, 'savedmodels'))

        # Run it from the checkpoint
        _aPlusBExp = runExperiment(
            args=[expDir, '--load=DefaultTask', '--noCheckpoint'])
Exemplo n.º 16
0
  def _testSamePredictions(self, experiment, predSteps, checkpointAt,
                           predictionsFilename, additionalFields=None,
                           newSerialization=False):
    """ Test that we get the same predictions out from the following two
    scenarios:

    a_plus_b: Run the network for 'a' iterations followed by 'b' iterations
    a, followed by b: Run the network for 'a' iterations, save it, load it
                      back in, then run for 'b' iterations.

    Parameters:
    -----------------------------------------------------------------------
    experiment:   base directory of the experiment. This directory should
                    contain the following:
                        base.py
                        a_plus_b/description.py
                        a/description.py
                        b/description.py
                    The sub-directory description files should import the
                    base.py and only change the first and last record used
                    from the data file.
    predSteps:   Number of steps ahead predictions are for
    checkpointAt: Number of iterations that 'a' runs for.
                 IMPORTANT: This must match the number of records that
                 a/description.py runs for - it is NOT dynamically stuffed into
                 the a/description.py.
    predictionsFilename: The name of the predictions file that the OPF
                  generates for this experiment (for example
                  'DefaulTask.NontemporalMultiStep.predictionLog.csv')
    newSerialization: Whether to use new capnproto serialization.
    """

    # Get the 3 sub-experiment directories
    aPlusBExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a_plus_b")
    aExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a")
    bExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "b")

    # Run a+b
    args = self._createExperimentArgs(aPlusBExpDir,
                                      newSerialization=newSerialization)
    _aPlusBExp = runExperiment(args)

    # Run a, the copy the saved checkpoint into the b directory
    args = self._createExperimentArgs(aExpDir,
                                      newSerialization=newSerialization)
    _aExp = runExperiment(args)
    if os.path.exists(os.path.join(bExpDir, 'savedmodels')):
      shutil.rmtree(os.path.join(bExpDir, 'savedmodels'))
    shutil.copytree(src=os.path.join(aExpDir, 'savedmodels'),
                    dst=os.path.join(bExpDir, 'savedmodels'))

    args = self._createExperimentArgs(bExpDir,
                                      newSerialization=newSerialization,
                                      additionalArgs=['--load=DefaultTask'])
    _bExp = runExperiment(args)

    # Now, compare the predictions at the end of a+b to those in b.
    aPlusBPred = FileRecordStream(os.path.join(aPlusBExpDir, 'inference',
                                   predictionsFilename))
    bPred = FileRecordStream(os.path.join(bExpDir, 'inference',
                                   predictionsFilename))

    colNames = [x[0] for x in aPlusBPred.getFields()]
    actValueColIdx = colNames.index('multiStepPredictions.actual')
    predValueColIdx = colNames.index('multiStepPredictions.%d' % (predSteps))

    # Skip past the 'a' records in aPlusB
    for i in range(checkpointAt):
      aPlusBPred.next()

    # Now, read through the records that don't have predictions yet
    for i in range(predSteps):
      aPlusBPred.next()
      bPred.next()

    # Now, compare predictions in the two files
    rowIdx = checkpointAt + predSteps + 4 - 1
    epsilon = 0.0001
    while True:
      rowIdx += 1
      try:
        rowAPB = aPlusBPred.next()
        rowB = bPred.next()

        # Compare actuals
        self.assertEqual(rowAPB[actValueColIdx], rowB[actValueColIdx],
              "Mismatch in actual values: row %d of a+b has %s and row %d of "
              "b has %s" % (rowIdx, rowAPB[actValueColIdx], rowIdx-checkpointAt,
                            rowB[actValueColIdx]))

        # Compare predictions, within nearest epsilon
        predAPB = eval(rowAPB[predValueColIdx])
        predB = eval(rowB[predValueColIdx])

        # Sort with highest probabilities first
        predAPB = [(a, b) for b, a in predAPB.items()]
        predB = [(a, b) for b, a in predB.items()]
        predAPB.sort(reverse=True)
        predB.sort(reverse=True)

        if additionalFields is not None:
          for additionalField in additionalFields:
            fieldIdx = colNames.index(additionalField)
            self.assertEqual(rowAPB[fieldIdx], rowB[fieldIdx],
              "Mismatch in field \'%s\' values: row %d of a+b has value: (%s)\n"
              " and row %d of b has value: %s" % \
              (additionalField, rowIdx, rowAPB[fieldIdx],
                rowIdx-checkpointAt, rowB[fieldIdx]))

        self.assertEqual(len(predAPB), len(predB),
              "Mismatch in predicted values: row %d of a+b has %d predictions: "
              "\n  (%s) and row %d of b has %d predictions:\n  (%s)" % \
              (rowIdx, len(predAPB), predAPB, rowIdx-checkpointAt, len(predB),
               predB))

        for i in range(len(predAPB)):
          (aProb, aValue) = predAPB[i]
          (bProb, bValue) = predB[i]
          self.assertLess(abs(aValue-bValue), epsilon,
              "Mismatch in predicted values: row %d of a+b predicts value %s "
              "and row %d of b predicts %s" % (rowIdx, aValue,
                                               rowIdx-checkpointAt, bValue))
          self.assertLess(abs(aProb-bProb), epsilon,
              "Mismatch in probabilities: row %d of a+b predicts %s with "
              "probability %s and row %d of b predicts %s with probability %s" \
               % (rowIdx, aValue, aProb, rowIdx-checkpointAt, bValue, bProb))

      except StopIteration:
        break

    # clean up model checkpoint directories
    shutil.rmtree(getCheckpointParentDir(aExpDir))
    shutil.rmtree(getCheckpointParentDir(bExpDir))
    shutil.rmtree(getCheckpointParentDir(aPlusBExpDir))

    print "Predictions match!"
Exemplo n.º 17
0
    def _testSamePredictions(self,
                             experiment,
                             predSteps,
                             checkpointAt,
                             predictionsFilename,
                             additionalFields=None):
        """ Test that we get the same predictions out from the following two
    scenarios:

    a_plus_b: Run the network for 'a' iterations followed by 'b' iterations
    a, followed by b: Run the network for 'a' iterations, save it, load it
                      back in, then run for 'b' iterations.

    Parameters:
    -----------------------------------------------------------------------
    experiment:   base directory of the experiment. This directory should
                    contain the following:
                        base.py
                        a_plus_b/description.py
                        a/description.py
                        b/description.py
                    The sub-directory description files should import the
                    base.py and only change the first and last record used
                    from the data file.
    predSteps:   Number of steps ahead predictions are for
    checkpointAt: Number of iterations that 'a' runs for.
                 IMPORTANT: This must match the number of records that
                 a/description.py runs for - it is NOT dynamically stuffed into
                 the a/description.py.
    predictionsFilename: The name of the predictions file that the OPF
                  generates for this experiment (for example
                  'DefaulTask.NontemporalMultiStep.predictionLog.csv')
    """

        # Get the 3 sub-experiment directories
        aPlusBExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a_plus_b")
        aExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a")
        bExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "b")

        # Run a+b
        _aPlusBExp = runExperiment(args=[aPlusBExpDir])

        # Run a, the copy the saved checkpoint into the b directory
        _aExp = runExperiment(args=[aExpDir])
        if os.path.exists(os.path.join(bExpDir, 'savedmodels')):
            shutil.rmtree(os.path.join(bExpDir, 'savedmodels'))
        shutil.copytree(src=os.path.join(aExpDir, 'savedmodels'),
                        dst=os.path.join(bExpDir, 'savedmodels'))

        _bExp = runExperiment(args=[bExpDir, '--load=DefaultTask'])

        # Now, compare the predictions at the end of a+b to those in b.
        aPlusBPred = FileRecordStream(
            os.path.join(aPlusBExpDir, 'inference', predictionsFilename))
        bPred = FileRecordStream(
            os.path.join(bExpDir, 'inference', predictionsFilename))

        colNames = [x[0] for x in aPlusBPred.getFields()]
        actValueColIdx = colNames.index('multiStepPredictions.actual')
        predValueColIdx = colNames.index('multiStepPredictions.%d' %
                                         (predSteps))

        # Skip past the 'a' records in aPlusB
        for i in range(checkpointAt):
            aPlusBPred.next()

        # Now, read through the records that don't have predictions yet
        for i in range(predSteps):
            aPlusBPred.next()
            bPred.next()

        # Now, compare predictions in the two files
        rowIdx = checkpointAt + predSteps + 4 - 1
        epsilon = 0.0001
        while True:
            rowIdx += 1
            try:
                rowAPB = aPlusBPred.next()
                rowB = bPred.next()

                # Compare actuals
                self.assertEqual(
                    rowAPB[actValueColIdx], rowB[actValueColIdx],
                    "Mismatch in actual values: row %d of a+b has %s and row %d of "
                    "b has %s" % (rowIdx, rowAPB[actValueColIdx],
                                  rowIdx - checkpointAt, rowB[actValueColIdx]))

                # Compare predictions, within nearest epsilon
                predAPB = eval(rowAPB[predValueColIdx])
                predB = eval(rowB[predValueColIdx])

                # Sort with highest probabilities first
                predAPB = [(a, b) for b, a in predAPB.items()]
                predB = [(a, b) for b, a in predB.items()]
                predAPB.sort(reverse=True)
                predB.sort(reverse=True)

                if additionalFields is not None:
                    for additionalField in additionalFields:
                        fieldIdx = colNames.index(additionalField)
                        self.assertEqual(rowAPB[fieldIdx], rowB[fieldIdx],
                          "Mismatch in field \'%s\' values: row %d of a+b has value: (%s)\n"
                          " and row %d of b has value: %s" % \
                          (additionalField, rowIdx, rowAPB[fieldIdx],
                            rowIdx-checkpointAt, rowB[fieldIdx]))

                self.assertEqual(len(predAPB), len(predB),
                      "Mismatch in predicted values: row %d of a+b has %d predictions: "
                      "\n  (%s) and row %d of b has %d predictions:\n  (%s)" % \
                      (rowIdx, len(predAPB), predAPB, rowIdx-checkpointAt, len(predB),
                       predB))

                for i in range(len(predAPB)):
                    (aProb, aValue) = predAPB[i]
                    (bProb, bValue) = predB[i]
                    self.assertLess(
                        abs(aValue - bValue), epsilon,
                        "Mismatch in predicted values: row %d of a+b predicts value %s "
                        "and row %d of b predicts %s" %
                        (rowIdx, aValue, rowIdx - checkpointAt, bValue))
                    self.assertLess(abs(aProb-bProb), epsilon,
                        "Mismatch in probabilities: row %d of a+b predicts %s with "
                        "probability %s and row %d of b predicts %s with probability %s" \
                         % (rowIdx, aValue, aProb, rowIdx-checkpointAt, bValue, bProb))

            except StopIteration:
                break

        # clean up model checkpoint directories
        shutil.rmtree(getCheckpointParentDir(aExpDir))
        shutil.rmtree(getCheckpointParentDir(bExpDir))
        shutil.rmtree(getCheckpointParentDir(aPlusBExpDir))

        print "Predictions match!"