def doGaussianExperiment(inverseReadoutResolution): """ Learn a set of objects. Then try to recognize each object. Output an interactive visualization. @param cellDimensions (pair) The cell dimensions of each module """ if not os.path.exists("traces"): os.makedirs("traces") locationConfigs = [] for i in xrange(5): scale = 10.0 * (math.sqrt(2)**i) for _ in xrange(4): orientation = np.radians(random.gauss(7.5, 7.5)) orientation = random.choice([orientation, -orientation]) locationConfigs.append({ "scale": scale, "inverseReadoutResolution": inverseReadoutResolution, "orientation": orientation, "activationThreshold": 8, "initialPermanence": 1.0, "connectedPermanence": 0.5, "learningThreshold": 8, "sampleSize": 10, "permanenceIncrement": 0.1, "permanenceDecrement": 0.0, }) L4Overrides = { "activationThreshold": 15, "minThreshold": 15, "initialPermanence": 1.0, } column = PIUNCorticalColumn(locationConfigs, L4Overrides, useGaussian=True) exp = PIUNExperiment(column, featureNames=("A", "B")) for objectDescription in OBJECTS: exp.learnObject(objectDescription) filename = "traces/gaussian-{}-resolution.html".format( np.prod(inverseReadoutResolution)) with io.open(filename, "w", encoding="utf8") as fileOut: with trace(fileOut, exp, includeSynapses=True): print "Logging to", filename for objectDescription in OBJECTS: succeeded = exp.inferObjectWithRandomMovements( objectDescription) if not succeeded: print 'Failed to infer object "{}"'.format( objectDescription["name"])
def doExperiment(cellDimensions, cellCoordinateOffsets): """ Learn a set of objects. Then try to recognize each object. Output an interactive visualization. @param cellDimensions (pair) The cell dimensions of each module @param cellCoordinateOffsets (sequence) The "cellCoordinateOffsets" parameter for each module """ if not os.path.exists("traces"): os.makedirs("traces") locationConfigs = [] for i in xrange(5): scale = 10.0 * (math.sqrt(2)**i) for _ in xrange(4): orientation = np.radians(random.gauss(7.5, 7.5)) orientation = random.choice([orientation, -orientation]) locationConfigs.append({ "cellDimensions": cellDimensions, "moduleMapDimensions": (scale, scale), "orientation": orientation, "cellCoordinateOffsets": cellCoordinateOffsets, }) column = PIUNCorticalColumn(locationConfigs) exp = PIUNExperiment(column, featureNames=("A", "B")) for objectDescription in OBJECTS: exp.learnObject(objectDescription) filename = "traces/{}-points-{}-cells.html".format( len(cellCoordinateOffsets)**2, np.prod(cellDimensions)) with io.open(filename, "w", encoding="utf8") as fileOut: with trace(fileOut, exp, includeSynapses=True): print "Logging to", filename for objectDescription in OBJECTS: succeeded = exp.inferObjectWithRandomMovements( objectDescription) if not succeeded: print 'Failed to infer object "{}"'.format( objectDescription["name"])
def doExperiment(cellDimensions, cellCoordinateOffsets, numObjects, featuresPerObject, objectWidth, numFeatures, useTrace, noiseFactor, moduleNoiseFactor, anchoringMethod="narrowing", randomLocation=False, threshold=16): """ Learn a set of objects. Then try to recognize each object. Output an interactive visualization. @param cellDimensions (pair) The cell dimensions of each module @param cellCoordinateOffsets (sequence) The "cellCoordinateOffsets" parameter for each module """ if not os.path.exists("traces"): os.makedirs("traces") features = generateFeatures(numFeatures) objects = generateObjects(numObjects, featuresPerObject, objectWidth, features) locationConfigs = [] scale = 5 * cellDimensions[0] # One cell is about a quarter of a feature numModules = 20 perModRange = float(90.0 / float(numModules)) if anchoringMethod == "corners": cellCoordinateOffsets = (.0001, .5, .9999) if anchoringMethod == "discrete": cellCoordinateOffsets = (.5, ) for i in xrange(numModules): orientation = float(i) * perModRange locationConfigs.append({ "cellDimensions": cellDimensions, "moduleMapDimensions": (scale, scale), "orientation": np.radians(orientation), "cellCoordinateOffsets": cellCoordinateOffsets, "activationThreshold": 8, "initialPermanence": 1.0, "connectedPermanence": 0.5, "learningThreshold": 8, "sampleSize": 20, "permanenceIncrement": 0.1, "permanenceDecrement": 0.0, "anchoringMethod": anchoringMethod, }) l4Overrides = { "initialPermanence": 1.0, "activationThreshold": threshold, "reducedBasalThreshold": threshold, "minThreshold": threshold, "sampleSize": numModules, "cellsPerColumn": 16, } column = PIUNCorticalColumn(locationConfigs, L4Overrides=l4Overrides) exp = PIUNExperiment(column, featureNames=features, numActiveMinicolumns=10, noiseFactor=noiseFactor, moduleNoiseFactor=moduleNoiseFactor) for objectDescription in objects: exp.learnObject(objectDescription, randomLocation=randomLocation, useNoise=False) print 'Learned object {}'.format(objectDescription["name"]) filename = "traces/{}-points-{}-cells-{}-objects-{}-feats-{}-random.html".format( len(cellCoordinateOffsets)**2, np.prod(cellDimensions), numObjects, numFeatures, randomLocation) convergence = collections.defaultdict(int) if useTrace: with io.open(filename, "w", encoding="utf8") as fileOut: with trace(fileOut, exp, includeSynapses=False): print "Logging to", filename for objectDescription in objects: steps = exp.inferObjectWithRandomMovements( objectDescription, randomLocation=randomLocation) convergence[steps] += 1 if steps is None: print 'Failed to infer object "{}"'.format( objectDescription["name"]) else: print 'Inferred object {} after {} steps'.format( objectDescription["name"], steps) else: for objectDescription in objects: steps = exp.inferObjectWithRandomMovements( objectDescription, randomLocation=randomLocation) convergence[steps] += 1 if steps is None: print 'Failed to infer object "{}"'.format( objectDescription["name"]) else: print 'Inferred object {} after {} steps'.format( objectDescription["name"], steps) for step, num in sorted(convergence.iteritems()): print "{}: {}".format(step, num) return (convergence)
def doExperiment(locationModuleWidth, cellCoordinateOffsets, numObjects, featuresPerObject, objectWidth, numFeatures, useTrace, useRawTrace, logCellActivity, noiseFactor, moduleNoiseFactor, numModules, numSensations, thresholds, seed1, seed2, anchoringMethod="narrowing"): """ Learn a set of objects. Then try to recognize each object. Output an interactive visualization. @param locationModuleWidth (int) The cell dimensions of each module @param cellCoordinateOffsets (sequence) The "cellCoordinateOffsets" parameter for each module """ if not os.path.exists("traces"): os.makedirs("traces") if seed1 != -1: np.random.seed(seed1) if seed2 != -1: random.seed(seed2) features = [str(i) for i in xrange(numFeatures)] objects = generateObjects(numObjects, featuresPerObject, objectWidth, numFeatures) locationConfigs = [] scale = 40.0 if thresholds == -1: thresholds = int(math.ceil(numModules * 0.8)) elif thresholds == 0: thresholds = numModules perModRange = float(90.0 / float(numModules)) for i in xrange(numModules): orientation = (float(i) * perModRange) + (perModRange / 2.0) locationConfigs.append({ "cellsPerAxis": locationModuleWidth, "scale": scale, "orientation": np.radians(orientation), "cellCoordinateOffsets": cellCoordinateOffsets, "activationThreshold": 8, "initialPermanence": 1.0, "connectedPermanence": 0.5, "learningThreshold": 8, "sampleSize": 10, "permanenceIncrement": 0.1, "permanenceDecrement": 0.0, "anchoringMethod": anchoringMethod, }) l4Overrides = { "initialPermanence": 1.0, "activationThreshold": thresholds, "reducedBasalThreshold": thresholds, "minThreshold": numModules, "sampleSize": numModules, "cellsPerColumn": 16, } column = PIUNCorticalColumn(locationConfigs, L4Overrides=l4Overrides) exp = PIUNExperiment(column, featureNames=features, numActiveMinicolumns=10, noiseFactor=noiseFactor, moduleNoiseFactor=moduleNoiseFactor) for objectDescription in objects: exp.learnObject(objectDescription) convergence = collections.defaultdict(int) try: if useTrace: filename = os.path.join( SCRIPT_DIR, "traces/{}-points-{}-cells-{}-objects-{}-feats.html".format( len(cellCoordinateOffsets)**2, exp.column.L6aModules[0].numberOfCells(), numObjects, numFeatures)) traceFileOut = io.open(filename, "w", encoding="utf8") traceHandle = trace(traceFileOut, exp, includeSynapses=True) print "Logging to", filename if useRawTrace: rawFilename = os.path.join( SCRIPT_DIR, "traces/{}-points-{}-cells-{}-objects-{}-feats.trace".format( len(cellCoordinateOffsets)**2, exp.column.L6aModules[0].numberOfCells(), numObjects, numFeatures)) rawTraceFileOut = open(rawFilename, "w") rawTraceHandle = rawTrace(rawTraceFileOut, exp, includeSynapses=False) print "Logging to", rawFilename if logCellActivity: cellActivityTracer = PIUNCellActivityTracer(exp) exp.addMonitor(cellActivityTracer) for objectDescription in objects: steps = exp.inferObjectWithRandomMovements(objectDescription, numSensations) convergence[steps] += 1 if steps is None: print 'Failed to infer object "{}"'.format( objectDescription["name"]) finally: if useTrace: traceHandle.__exit__() traceFileOut.close() if useRawTrace: rawTraceHandle.__exit__() rawTraceFileOut.close() for step, num in sorted(convergence.iteritems()): print "{}: {}".format(step, num) if logCellActivity: return { "convergence": convergence, "locationLayerTimelineByObject": cellActivityTracer.locationLayerTimelineByObject, "inferredStepByObject": cellActivityTracer.inferredStepByObject, } else: return convergence
def doExperiment(cellDimensions, cellCoordinateOffsets, numObjects, featuresPerObject, objectWidth, numFeatures, useTrace, noiseFactor, moduleNoiseFactor, anchoringMethod="narrowing", randomLocation=False, threshold=16): """ Learn a set of objects. Then try to recognize each object. Output an interactive visualization. @param cellDimensions (pair) The cell dimensions of each module @param cellCoordinateOffsets (sequence) The "cellCoordinateOffsets" parameter for each module """ if not os.path.exists("traces"): os.makedirs("traces") features = generateFeatures(numFeatures) objects = generateObjects(numObjects, featuresPerObject, objectWidth, features) locationConfigs = [] scale = 5*cellDimensions[0] # One cell is about a quarter of a feature numModules = 20 perModRange = float(90.0 / float(numModules)) if anchoringMethod == "corners": cellCoordinateOffsets = (.0001, .5, .9999) if anchoringMethod == "discrete": cellCoordinateOffsets = (.5,) for i in xrange(numModules): orientation = float(i) * perModRange locationConfigs.append({ "cellDimensions": cellDimensions, "moduleMapDimensions": (scale, scale), "orientation": np.radians(orientation), "cellCoordinateOffsets": cellCoordinateOffsets, "activationThreshold": 8, "initialPermanence": 1.0, "connectedPermanence": 0.5, "learningThreshold": 8, "sampleSize": 20, "permanenceIncrement": 0.1, "permanenceDecrement": 0.0, "anchoringMethod": anchoringMethod, }) l4Overrides = { "initialPermanence": 1.0, "activationThreshold": threshold, "reducedBasalThreshold": threshold, "minThreshold": threshold, "sampleSize": numModules, "cellsPerColumn": 16, } column = PIUNCorticalColumn(locationConfigs, L4Overrides=l4Overrides) exp = PIUNExperiment(column, featureNames=features, numActiveMinicolumns=10, noiseFactor=noiseFactor, moduleNoiseFactor=moduleNoiseFactor) for objectDescription in objects: exp.learnObject(objectDescription, randomLocation=randomLocation, useNoise = False) print 'Learned object {}'.format(objectDescription["name"]) filename = "traces/{}-points-{}-cells-{}-objects-{}-feats-{}-random.html".format( len(cellCoordinateOffsets)**2, np.prod(cellDimensions), numObjects, numFeatures, randomLocation) convergence = collections.defaultdict(int) if useTrace: with io.open(filename, "w", encoding="utf8") as fileOut: with trace(fileOut, exp, includeSynapses=False): print "Logging to", filename for objectDescription in objects: steps = exp.inferObjectWithRandomMovements(objectDescription, randomLocation=randomLocation) convergence[steps] += 1 if steps is None: print 'Failed to infer object "{}"'.format(objectDescription["name"]) else: print 'Inferred object {} after {} steps'.format(objectDescription["name"], steps) else: for objectDescription in objects: steps = exp.inferObjectWithRandomMovements(objectDescription, randomLocation=randomLocation) convergence[steps] += 1 if steps is None: print 'Failed to infer object "{}"'.format(objectDescription["name"]) else: print 'Inferred object {} after {} steps'.format(objectDescription["name"], steps) for step, num in sorted(convergence.iteritems()): print "{}: {}".format(step, num) return(convergence)
def doExperiment(numObjects, featuresPerObject, objectWidth, numFeatures, useTrace, useRawTrace, noiseFactor, moduleNoiseFactor, numModules, thresholds, inverseReadoutResolution, enlargeModuleFactor, bumpOverlapMethod): """ Learn a set of objects. Then try to recognize each object. Output an interactive visualization. """ if not os.path.exists("traces"): os.makedirs("traces") features = [str(i) for i in xrange(numFeatures)] objects = generateObjects(numObjects, featuresPerObject, objectWidth, numFeatures) locationConfigs = [] scale = 40.0 if thresholds is None: thresholds = int(((numModules + 1) * 0.8)) elif thresholds == 0: thresholds = numModules perModRange = float(90.0 / float(numModules)) for i in xrange(numModules): orientation = (float(i) * perModRange) + (perModRange / 2.0) locationConfigs.append({ "scale": scale, "orientation": np.radians(orientation), "activationThreshold": 8, "initialPermanence": 1.0, "connectedPermanence": 0.5, "learningThreshold": 8, "sampleSize": 10, "permanenceIncrement": 0.1, "permanenceDecrement": 0.0, "inverseReadoutResolution": inverseReadoutResolution, "enlargeModuleFactor": enlargeModuleFactor, "bumpOverlapMethod": bumpOverlapMethod, }) l4Overrides = { "initialPermanence": 1.0, "activationThreshold": thresholds, "reducedBasalThreshold": thresholds, "minThreshold": numModules, "sampleSize": numModules, "cellsPerColumn": 16, } column = PIUNCorticalColumn(locationConfigs, L4Overrides=l4Overrides, useGaussian=True) exp = PIUNExperiment(column, featureNames=features, numActiveMinicolumns=10, noiseFactor=noiseFactor, moduleNoiseFactor=moduleNoiseFactor) for objectDescription in objects: exp.learnObject(objectDescription) filename = os.path.join( SCRIPT_DIR, "traces/{}-resolution-{}-modules-{}-objects-{}-feats.html".format( inverseReadoutResolution, numModules, numObjects, numFeatures)) rawFilename = os.path.join( SCRIPT_DIR, "traces/{}-resolution-{}-modules-{}-objects-{}-feats.trace".format( inverseReadoutResolution, numModules, numObjects, numFeatures)) assert not (useTrace and useRawTrace), "Cannot use both --trace and --rawTrace" convergence = collections.defaultdict(int) if useTrace: with io.open(filename, "w", encoding="utf8") as fileOut: with trace(fileOut, exp, includeSynapses=True): print "Logging to", filename for objectDescription in objects: steps = exp.inferObjectWithRandomMovements( objectDescription) convergence[steps] += 1 if steps is None: print 'Failed to infer object "{}"'.format( objectDescription["name"]) elif useRawTrace: with io.open(rawFilename, "w", encoding="utf8") as fileOut: strOut = StringIO.StringIO() with rawTrace(strOut, exp, includeSynapses=False): print "Logging to", filename for objectDescription in objects: steps = exp.inferObjectWithRandomMovements( objectDescription) convergence[steps] += 1 if steps is None: print 'Failed to infer object "{}"'.format( objectDescription["name"]) fileOut.write(unicode(strOut.getvalue())) else: for objectDescription in objects: steps = exp.inferObjectWithRandomMovements(objectDescription) convergence[steps] += 1 if steps is None: print 'Failed to infer object "{}"'.format( objectDescription["name"]) for step, num in sorted(convergence.iteritems()): print "{}: {}".format(step, num) return (convergence)
def doExperiment(locationModuleWidth, cellCoordinateOffsets, initialIncrement, minAccuracy, capacityResolution, featuresPerObject, objectWidth, numFeatures, useTrace, noiseFactor, moduleNoiseFactor, numModules, thresholds, seed1, seed2, anchoringMethod="narrowing"): """ Finds the capacity of the specified model and object configuration. The algorithm has two stages. First it finds an upper bound for the capacity by repeatedly incrementing the number of objects by initialIncrement. After it finds a number of objects that is above capacity, it begins the second stage: performing a binary search over the number of objects to find an exact capacity. @param initialIncrement (int) For example, if this number is 128, this method will test 128 objects, then 256, and so on, until it finds an upper bound, then it will narrow in on the breaking point. This number can't be incorrect, but the simulation will be inefficient if it's too low or too high. @param capacityResolution (int) The resolution of the capacity. If capacityResolution=1, this method will find the exact capacity. If the capacityResolution is higher, the method will return a capacity that is potentially less than the actual capacity. @param minAccuracy (float) The recognition success rate that the model must achieve. """ if not os.path.exists("traces"): os.makedirs("traces") if seed1 != -1: np.random.seed(seed1) if seed2 != -1: random.seed(seed2) features = [str(i) for i in xrange(numFeatures)] locationConfigs = [] scale = 40.0 if thresholds == -1: thresholds = int(math.ceil(numModules * 0.8)) elif thresholds == 0: thresholds = numModules perModRange = float(90.0 / float(numModules)) for i in xrange(numModules): orientation = (float(i) * perModRange) + (perModRange / 2.0) locationConfigs.append({ "cellsPerAxis": locationModuleWidth, "scale": scale, "orientation": np.radians(orientation), "cellCoordinateOffsets": cellCoordinateOffsets, "activationThreshold": 8, "initialPermanence": 1.0, "connectedPermanence": 0.5, "learningThreshold": 8, "sampleSize": 10, "permanenceIncrement": 0.1, "permanenceDecrement": 0.0, "anchoringMethod": anchoringMethod, }) l4Overrides = { "initialPermanence": 1.0, "activationThreshold": thresholds, "reducedBasalThreshold": thresholds, "minThreshold": numModules, "sampleSize": numModules, "cellsPerColumn": 16, } increment = initialIncrement numObjects = 0 accuracy = None foundUpperBound = False while True: currentNumObjects = numObjects + increment numFailuresAllowed = currentNumObjects * (1 - minAccuracy) print "Testing", currentNumObjects objects = generateObjects(currentNumObjects, featuresPerObject, objectWidth, numFeatures) column = PIUNCorticalColumn(locationConfigs, L4Overrides=l4Overrides) exp = PIUNExperiment(column, featureNames=features, numActiveMinicolumns=10, noiseFactor=noiseFactor, moduleNoiseFactor=moduleNoiseFactor) for objectDescription in objects: exp.learnObject(objectDescription) numFailures = 0 try: if useTrace: filename = os.path.join( SCRIPT_DIR, "traces/capacity-{}-points-{}-cells-{}-objects-{}-feats.html" .format( len(cellCoordinateOffsets)**2, exp.column.L6aModules[0].numberOfCells(), numObjects, numFeatures)) traceFileOut = io.open(filename, "w", encoding="utf8") traceHandle = trace(traceFileOut, exp, includeSynapses=True) print "Logging to", filename for objectDescription in objects: numSensationsToInference = exp.inferObjectWithRandomMovements( objectDescription) if numSensationsToInference is None: numFailures += 1 if numFailures > numFailuresAllowed: break finally: if useTrace: traceHandle.__exit__() traceFileOut.close() if numFailures < numFailuresAllowed: numObjects = currentNumObjects accuracy = float(currentNumObjects - numFailures) / currentNumObjects else: foundUpperBound = True if foundUpperBound: increment /= 2 if increment < capacityResolution: break result = { "numObjects": numObjects, "accuracy": accuracy, } print result return result