Exemplo n.º 1
0
def runExperiment(experimentDirectory, experimentOptions):
    o = experimentOptions
    # Clean aggregation datasets if needed
    #if o['clearAggregationDatasets']:
    #helpers.cleanAggregationDatasets()

    global experiment

    experiment = Experiment(path=experimentDirectory,
                            runtimeOptions=experimentOptions)

    # Create GUIs as needed
    if not experimentOptions['postProcessOnly']:
        if o['runGUI']:
            # Only import these if the gui is requested, because
            # they import further modules that require a console
            # and exit if there is no console (as in the autobuild)
            from nupic.frameworks.prediction.GUIs import TrainingGUI, InferenceGUI
            if not (o['createNetworkOnly'] or o['runInferenceOnly']):
                gui.append(TrainingGUI(experiment))
            gui.append(InferenceGUI(experiment))
            for i, g in enumerate(gui):
                g.start()
        else:
            experiment.run()

    if experimentOptions['postProcess']:
        from nupic.frameworks.prediction.postprocess import postProcess
        try:
            postProcess(experiment)
            experiment.writeResults()
            if os.environ.has_key('NTA_AMAZON_SYNC_DATA'):
                print "Deleting log files"
                inferenceDir = os.path.join(experimentDirectory, "inference")
                logFiles = glob.glob(os.path.join(inferenceDir, "*.txt"))
                for f in logFiles:
                    try:
                        os.remove(f)
                    except:
                        print "Couldn't remove log file:", f
        except Exception, e:
            message = "Post processing has failed, %s" % str(e.args)
            e.args = (message, ) + e.args[1:]
            #traceback.print_exc(file=sys.stdout)
            raise
Exemplo n.º 2
0
def postProcess(experiment):

    if not isinstance(experiment, Experiment):
        experiment = Experiment(experiment)

    inferenceDir = experiment.getInferenceDirectory()

    # Use level1 rather than level1_sp because the latter
    # is still in learning mode
    # use "all" rather than level1 because level1_sp is not created by default
    experiment.loadNetwork("all")
    net = experiment.network

    # Analyze the trained network and extract the auxiliary data structures
    netInfo = _analyzeTrainedNet(net=net, options=dict())

    # ------------------------------------------------------------------------
    # Get the list of tests, and the list of baselines required
    testNames = [test['name'] for test in experiment.description['infer']]
    testOptions = [
        test.get('ppOptions', dict())
        for test in experiment.description['infer']
    ]
    standaloneTests = list(testNames)
    options = dict(zip(testNames, testOptions))

    baselineTestNames = set()
    for testName in testNames:
        (datasetName, testVariant) = testName.split('_')
        if testVariant == 'baseline':
            baselineTestNames.add(testName)

    # ------------------------------------------------------------------------
    # For each baseline, train the classifier, and then run each variant that uses
    #  that baseline
    for baselineTestName in baselineTestNames:
        (baseDatasetName, baseVariantName) = baselineTestName.split('_')
        assert (baseVariantName == 'baseline')
        standaloneTests.remove(baselineTestName)
        baseline = _analyzeTestRun(experiment,
                                   netInfo=netInfo,
                                   options=options[baselineTestName],
                                   datasetName=baseDatasetName,
                                   testVariantName=baseVariantName,
                                   baseline=None)

        # Now, run each variant that uses this baseline
        for testName in testNames:
            (datasetName, variantName) = testName.split('_')
            if datasetName != baseDatasetName or variantName == 'baseline':
                continue
            standaloneTests.remove(testName)
            _analyzeTestRun(experiment,
                            netInfo=netInfo,
                            options=options[testName],
                            datasetName=datasetName,
                            testVariantName=variantName,
                            baseline=baseline)

    # Run each standalone test
    for testName in standaloneTests:
        (datasetName, variantName) = testName.split('_')
        _analyzeTestRun(experiment,
                        netInfo=netInfo,
                        options=options[testName],
                        datasetName=datasetName,
                        testVariantName=variantName,
                        baseline=None)