Exemplo n.º 1
0
def main():
    random.seed(42)
    numpy.random.seed(42)
    experiments, experimentOptions, otherOptions = Experiment.parseOptions(
        sys.argv[1:])

    print experimentOptions
    print otherOptions

    if len(experiments) != 1:
        raise RuntimeError("You must specify exactly one experiment")

    # We want to list all the available checkpoints
    if otherOptions['listAvailableCheckpoints']:
        utils.printAvailableCheckpoints(experiments[0])
        sys.exit()

    if len(experiments) != 1:
        usage(parser, "Exactly one experiment may be specified")

    if otherOptions['profilePython']:
        _runExperiment = profileRunExperiment
    else:
        _runExperiment = runExperiment

    _runExperiment(experiments[0], experimentOptions)

    if otherOptions['profilePython']:
        p = pstats.Stats("re.profile")
        p.strip_dirs().sort_stats("cumulative").print_stats(30)
Exemplo n.º 2
0
def main():
  random.seed(42)
  numpy.random.seed(42)
  experiments, experimentOptions, otherOptions = Experiment.parseOptions(sys.argv[1:])
  
  print experimentOptions
  print otherOptions
  
  if len(experiments) != 1:
    raise RuntimeError("You must specify exactly one experiment")

  # We want to list all the available checkpoints
  if otherOptions['listAvailableCheckpoints']:
    utils.printAvailableCheckpoints(experiments[0])
    sys.exit()
    
  if len(experiments) != 1:
    usage(parser, "Exactly one experiment may be specified")

  if otherOptions['profilePython']:
    _runExperiment = profileRunExperiment
  else:
    _runExperiment = runExperiment

  _runExperiment(experiments[0], experimentOptions)

  if otherOptions['profilePython']:
    p = pstats.Stats("re.profile")
    p.strip_dirs().sort_stats("cumulative").print_stats(30)
Exemplo n.º 3
0
def runExperiment(experimentDirectory, experimentOptions):
  o = experimentOptions
  # Clean aggregation datasets if needed
  #if o['clearAggregationDatasets']:
  #helpers.cleanAggregationDatasets()
      
  global experiment

  experiment = Experiment(path=experimentDirectory, runtimeOptions=experimentOptions)
  
  
  
  # Create GUIs as needed
  if not experimentOptions['postProcessOnly']:
    if o['runGUI']:
      # Only import these if the gui is requested, because
      # they import further modules that require a console
      # and exit if there is no console (as in the autobuild)
      from nupic.frameworks.prediction.GUIs import TrainingGUI, InferenceGUI
      if not (o['createNetworkOnly'] or o['runInferenceOnly']):
        gui.append(TrainingGUI(experiment))
      gui.append(InferenceGUI(experiment))
      for i, g in enumerate(gui):
        g.start()
    else:
      experiment.run()

  if experimentOptions['postProcess']:
    from nupic.frameworks.prediction.postprocess import postProcess
    try:
      postProcess(experiment)
      experiment.writeResults()
      if os.environ.has_key('NTA_AMAZON_SYNC_DATA'):
        print "Deleting log files"
        inferenceDir = os.path.join(experimentDirectory,"inference")
        logFiles = glob.glob(os.path.join(inferenceDir,"*.txt"))
        for f in logFiles:
          try:
            os.remove(f)
          except:
            print "Couldn't remove log file:",f
    except Exception, e:
      message = "Post processing has failed, %s" % str(e.args)
      e.args = (message,) +e.args[1:]
      #traceback.print_exc(file=sys.stdout)
      raise 
Exemplo n.º 4
0
def runExperiment(experimentDirectory, experimentOptions):
    o = experimentOptions
    # Clean aggregation datasets if needed
    #if o['clearAggregationDatasets']:
    #helpers.cleanAggregationDatasets()

    global experiment

    experiment = Experiment(path=experimentDirectory,
                            runtimeOptions=experimentOptions)

    # Create GUIs as needed
    if not experimentOptions['postProcessOnly']:
        if o['runGUI']:
            # Only import these if the gui is requested, because
            # they import further modules that require a console
            # and exit if there is no console (as in the autobuild)
            from nupic.frameworks.prediction.GUIs import TrainingGUI, InferenceGUI
            if not (o['createNetworkOnly'] or o['runInferenceOnly']):
                gui.append(TrainingGUI(experiment))
            gui.append(InferenceGUI(experiment))
            for i, g in enumerate(gui):
                g.start()
        else:
            experiment.run()

    if experimentOptions['postProcess']:
        from nupic.frameworks.prediction.postprocess import postProcess
        try:
            postProcess(experiment)
            experiment.writeResults()
            if os.environ.has_key('NTA_AMAZON_SYNC_DATA'):
                print "Deleting log files"
                inferenceDir = os.path.join(experimentDirectory, "inference")
                logFiles = glob.glob(os.path.join(inferenceDir, "*.txt"))
                for f in logFiles:
                    try:
                        os.remove(f)
                    except:
                        print "Couldn't remove log file:", f
        except Exception, e:
            message = "Post processing has failed, %s" % str(e.args)
            e.args = (message, ) + e.args[1:]
            #traceback.print_exc(file=sys.stdout)
            raise
Exemplo n.º 5
0
def postProcess(experiment):

  if not isinstance(experiment, Experiment):
    experiment = Experiment(experiment)

  inferenceDir = experiment.getInferenceDirectory()

  # Use level1 rather than level1_sp because the latter
  # is still in learning mode
  # use "all" rather than level1 because level1_sp is not created by default
  experiment.loadNetwork("all")
  net = experiment.network


  # Analyze the trained network and extract the auxiliary data structures
  netInfo = _analyzeTrainedNet(net=net, options=dict())


  # ------------------------------------------------------------------------
  # Get the list of tests, and the list of baselines required
  testNames = [test['name'] for test in experiment.description['infer']]
  testOptions = [test.get('ppOptions', dict()) for test in experiment.description['infer']]
  standaloneTests = list(testNames)
  options = dict(zip(testNames, testOptions))

  baselineTestNames = set()
  for testName in testNames:
    (datasetName, testVariant) = testName.split('_')
    if testVariant == 'baseline':
      baselineTestNames.add(testName)


  # ------------------------------------------------------------------------
  # For each baseline, train the classifier, and then run each variant that uses
  #  that baseline
  for baselineTestName in baselineTestNames:
    (baseDatasetName, baseVariantName) = baselineTestName.split('_')
    assert(baseVariantName == 'baseline')
    standaloneTests.remove(baselineTestName)
    baseline = _analyzeTestRun(experiment, netInfo=netInfo,
            options=options[baselineTestName],
            datasetName=baseDatasetName, testVariantName=baseVariantName,
            baseline=None)

    # Now, run each variant that uses this baseline
    for testName in testNames:
      (datasetName, variantName) = testName.split('_')
      if datasetName != baseDatasetName or variantName == 'baseline':
        continue
      standaloneTests.remove(testName)
      _analyzeTestRun(experiment, netInfo=netInfo, options=options[testName],
            datasetName=datasetName, testVariantName=variantName,
            baseline=baseline)


  # Run each standalone test
  for testName in standaloneTests:
    (datasetName, variantName) = testName.split('_')
    _analyzeTestRun(experiment, netInfo=netInfo, options=options[testName],
            datasetName=datasetName, testVariantName=variantName,
            baseline=None)
Exemplo n.º 6
0
def postProcess(experiment):

    if not isinstance(experiment, Experiment):
        experiment = Experiment(experiment)

    inferenceDir = experiment.getInferenceDirectory()

    # Use level1 rather than level1_sp because the latter
    # is still in learning mode
    # use "all" rather than level1 because level1_sp is not created by default
    experiment.loadNetwork("all")
    net = experiment.network

    # Analyze the trained network and extract the auxiliary data structures
    netInfo = _analyzeTrainedNet(net=net, options=dict())

    # ------------------------------------------------------------------------
    # Get the list of tests, and the list of baselines required
    testNames = [test['name'] for test in experiment.description['infer']]
    testOptions = [
        test.get('ppOptions', dict())
        for test in experiment.description['infer']
    ]
    standaloneTests = list(testNames)
    options = dict(zip(testNames, testOptions))

    baselineTestNames = set()
    for testName in testNames:
        (datasetName, testVariant) = testName.split('_')
        if testVariant == 'baseline':
            baselineTestNames.add(testName)

    # ------------------------------------------------------------------------
    # For each baseline, train the classifier, and then run each variant that uses
    #  that baseline
    for baselineTestName in baselineTestNames:
        (baseDatasetName, baseVariantName) = baselineTestName.split('_')
        assert (baseVariantName == 'baseline')
        standaloneTests.remove(baselineTestName)
        baseline = _analyzeTestRun(experiment,
                                   netInfo=netInfo,
                                   options=options[baselineTestName],
                                   datasetName=baseDatasetName,
                                   testVariantName=baseVariantName,
                                   baseline=None)

        # Now, run each variant that uses this baseline
        for testName in testNames:
            (datasetName, variantName) = testName.split('_')
            if datasetName != baseDatasetName or variantName == 'baseline':
                continue
            standaloneTests.remove(testName)
            _analyzeTestRun(experiment,
                            netInfo=netInfo,
                            options=options[testName],
                            datasetName=datasetName,
                            testVariantName=variantName,
                            baseline=baseline)

    # Run each standalone test
    for testName in standaloneTests:
        (datasetName, variantName) = testName.split('_')
        _analyzeTestRun(experiment,
                        netInfo=netInfo,
                        options=options[testName],
                        datasetName=datasetName,
                        testVariantName=variantName,
                        baseline=None)