def runExperimentPool(numObjects,
                      numLocations,
                      numFeatures,
                      numColumns,
                      networkType=["MultipleL4L2Columns"],
                      longDistanceConnectionsRange = [0.0],
                      numWorkers=7,
                      nTrials=1,
                      pointRange=1,
                      numPoints=10,
                      numInferenceRpts=1,
                      l2Params=None,
                      l4Params=None,
                      resultsName="convergence_results.pkl"):
  """
  Allows you to run a number of experiments using multiple processes.
  For each parameter except numWorkers, pass in a list containing valid values
  for that parameter. The cross product of everything is run, and each
  combination is run nTrials times.

  Returns a list of dict containing detailed results from each experiment.
  Also pickles and saves the results in resultsName for later analysis.

  Example:
    results = runExperimentPool(
                          numObjects=[10],
                          numLocations=[5],
                          numFeatures=[5],
                          numColumns=[2,3,4,5,6],
                          numWorkers=8,
                          nTrials=5)
  """
  # Create function arguments for every possibility
  args = []

  for c in reversed(numColumns):
    for o in reversed(numObjects):
      for l in numLocations:
        for f in numFeatures:
          for n in networkType:
            for p in longDistanceConnectionsRange:
              for t in range(nTrials):
                args.append(
                  {"numObjects": o,
                   "numLocations": l,
                   "numFeatures": f,
                   "numColumns": c,
                   "trialNum": t,
                   "pointRange": pointRange,
                   "numPoints": numPoints,
                   "networkType" : n,
                   "longDistanceConnections" : p,
                   "plotInferenceStats": False,
                   "settlingTime": 3,
                   "numInferenceRpts": numInferenceRpts,
                   "l2Params": l2Params,
                   "l4Params": l4Params
                   }
                )
  print "{} experiments to run, {} workers".format(len(args), numWorkers)
  # Run the pool
  if numWorkers > 1:
    pool = Pool(processes=numWorkers)
    result = pool.map(runExperiment, args)
  else:
    result = []
    for arg in args:
      result.append(runExperiment(arg))

  # print "Full results:"
  # pprint.pprint(result, width=150)

  # Pickle results for later use
  with open(resultsName,"wb") as f:
    cPickle.dump(result,f)

  return result
  plt.close()


if __name__ == "__main__":

  # This is how you run a specific experiment in single process mode. Useful
  # for debugging, profiling, etc.
  if True:
    results = runExperiment(
                  {
                    "numObjects": 100,
                    "numPoints": 10,
                    "numLocations": 10,
                    "numFeatures": 10,
                    "numColumns": 1,
                    "trialNum": 4,
                    "pointRange": 1,
                    "featureNoise": 0.40,
                    "plotInferenceStats": True,  # Outputs detailed graphs
                    "settlingTime": 3,
                    "includeRandomLocation": False
                  }
    )


  ################
  # These experiments look at the effect of topology

  # Here we want to see how the number of columns affects convergence.
  # This experiment is run using a process pool
  if False:
Esempio n. 3
0
        numColumns=columnRange,
        numPoints=10,
        numWorkers=cpu_count() - 1,
        nTrials=numTrials,
        resultsName="object_convergence_multi_column_results.pkl")

    # Analyze results
    with open("object_convergence_multi_column_results.pkl", "rb") as f:
        results = cPickle.load(f)

    plotConvergenceByColumn(results, columnRange, featureRange, numTrials)


if __name__ == "__main__":

    # This is how you run a specific experiment in single process mode. Useful
    # for debugging, profiling, etc.
    if True:
        results = runExperiment({
            "numObjects": 100,
            "numPoints": 10,
            "numLocations": 10,
            "numFeatures": 10,
            "numColumns": 1,
            "trialNum": 4,
            "featureNoise": 0.0,
            "plotInferenceStats": False,  # Outputs detailed graphs
            "settlingTime": 2,
            "includeRandomLocation": False
        })
Esempio n. 4
0
  plt.savefig(plotPath)
  plt.close()

if __name__ == "__main__":

  # This is how you run a specific experiment in single process mode. Useful
  # for debugging, profiling, etc.
  if False:
    results = runExperiment(
                  {
                    "numObjects": 100,
                    "numPoints": 10,
                    "numLocations": 10,
                    "numFeatures": 10,
                    "numColumns": 1,
                    "trialNum": 4,
                    "featureNoise": 0.6,
                    "plotInferenceStats": False,  # Outputs detailed graphs
                    "settlingTime": 3,
                    "includeRandomLocation": False,
                    "l2Params": {"cellCount": 4096*4, "sdrSize": 40*2, "activationThresholdDistal": 14}
                  }
    )

  # This is for specifically testing how the distal activation threshold affect
  # the classification results
  if True:
    activationThresholdDistalRange = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
    results_by_thresholds = []
    for i in range(len(activationThresholdDistalRange)):
        results = runExperiment(
Esempio n. 5
0

if __name__ == "__main__":

    # This is how you run a specific experiment in single process mode. Useful
    # for debugging, profiling, etc.
    if False:
        results = runExperiment({
            "numObjects": 100,
            "numPoints": 10,
            "numLocations": 10,
            "numFeatures": 10,
            "numColumns": 1,
            "trialNum": 4,
            "featureNoise": 0.6,
            "plotInferenceStats": False,  # Outputs detailed graphs
            "settlingTime": 3,
            "includeRandomLocation": False,
            "l2Params": {
                "cellCount": 4096 * 4,
                "sdrSize": 40 * 2,
                "activationThresholdDistal": 14
            }
        })

    # This is for specifically testing how the distal activation threshold affect
    # the classification results
    if True:
        activationThresholdDistalRange = [
            1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
        ]
def runExperimentPool(numObjects,
                      numLocations,
                      numFeatures,
                      numColumns,
                      longDistanceConnectionsRange = [0.0],
                      numWorkers=7,
                      nTrials=1,
                      numPoints=10,
                      locationNoiseRange=[0.0],
                      featureNoiseRange=[0.0],
                      enableFeedback=[True],
                      ambiguousLocationsRange=[0],
                      numInferenceRpts=1,
                      l2Params=None,
                      l4Params=None,
                      resultsName="convergence_results.pkl"):
  """
  Allows you to run a number of experiments using multiple processes.
  For each parameter except numWorkers, pass in a list containing valid values
  for that parameter. The cross product of everything is run, and each
  combination is run nTrials times.

  Returns a list of dict containing detailed results from each experiment.
  Also pickles and saves the results in resultsName for later analysis.

  Example:
    results = runExperimentPool(
                          numObjects=[10],
                          numLocations=[5],
                          numFeatures=[5],
                          numColumns=[2,3,4,5,6],
                          numWorkers=8,
                          nTrials=5)
  """
  # Create function arguments for every possibility
  args = []

  for c in reversed(numColumns):
    for o in reversed(numObjects):
      for l in numLocations:
        for f in numFeatures:
          for p in longDistanceConnectionsRange:
            for t in range(nTrials):
              for locationNoise in locationNoiseRange:
                for featureNoise in featureNoiseRange:
                  for ambiguousLocations in ambiguousLocationsRange:
                    for feedback in enableFeedback:
                      args.append(
                        {"numObjects": o,
                         "numLocations": l,
                         "numFeatures": f,
                         "numColumns": c,
                         "trialNum": t,
                         "numPoints": numPoints,
                         "longDistanceConnections" : p,
                         "plotInferenceStats": False,
                         "settlingTime": 3,
                         "locationNoise": locationNoise,
                         "featureNoise": featureNoise,
                         "enableFeedback": feedback,
                         "numAmbiguousLocations": ambiguousLocations,
                         "numInferenceRpts": numInferenceRpts,
                         "l2Params": l2Params,
                         "l4Params": l4Params
                         }
              )
  print "{} experiments to run, {} workers".format(len(args), numWorkers)
  # Run the pool
  if numWorkers > 1:
    pool = Pool(processes=numWorkers)
    result = pool.map(runExperiment, args)
  else:
    result = []
    for arg in args:
      result.append(runExperiment(arg))

  # print "Full results:"
  # pprint.pprint(result, width=150)

  # Pickle results for later use
  with open(resultsName,"wb") as f:
    cPickle.dump(result,f)

  return result