def runStretchExperiment(numObjects=25): """ Generates a lot of random objects to profile the network. Parameters: ---------------------------- @param numObjects (int) Number of objects to create and learn. """ exp = L4L2Experiment("profiling_experiment", ) objects = createObjectMachine(machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024) objects.createRandomObjects(numObjects=numObjects, numPoints=10) exp.learnObjects(objects.provideObjectsToLearn()) exp.printProfile() inferConfig = {"numSteps": len(objects[0]), "pairs": {0: objects[0]}} exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) exp.printProfile() exp.plotInferenceStats(fields=[ "L2 Representation", "Overlap L2 with object", "L4 Representation" ])
def testGetDistinctPairs(self): """Ensures we can compute unique pairs.""" pairObjects = createObjectMachine(machineType="simple", numInputBits=20, sensorInputSize=150, externalInputSize=2400, numCorticalColumns=3, numFeatures=5, numLocations=10, seed=42) pairObjects.addObject([(1, 3)], 0) pairObjects.addObject([(3, 1), (1, 3)], 1) pairObjects.addObject([(2, 4)], 2) distinctPairs = pairObjects.getDistinctPairs() self.assertEqual(len(distinctPairs), 3) pairObjects.addObject([(2, 4), (1, 3)], 3) distinctPairs = pairObjects.getDistinctPairs() self.assertEqual(len(distinctPairs), 3) pairObjects.addObject([(2, 4), (1, 3), (1, 1)], 4) distinctPairs = pairObjects.getDistinctPairs() self.assertEqual(len(distinctPairs), 4)
def testGetDistinctPairs(self): """Ensures we can compute unique pairs.""" pairObjects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=150, externalInputSize=2400, numCorticalColumns=3, numFeatures=5, numLocations=10, seed=42 ) pairObjects.addObject([(1, 3)], 0) pairObjects.addObject([(3, 1), (1, 3)], 1) pairObjects.addObject([(2, 4)], 2) distinctPairs = pairObjects.getDistinctPairs() self.assertEqual(len(distinctPairs), 3) pairObjects.addObject([(2, 4), (1, 3)], 3) distinctPairs = pairObjects.getDistinctPairs() self.assertEqual(len(distinctPairs), 3) pairObjects.addObject([(2, 4), (1, 3), (1, 1)], 4) distinctPairs = pairObjects.getDistinctPairs() self.assertEqual(len(distinctPairs), 4)
def runCapacityTest(numObjects, numPointsPerObject, sampleSize, activationThreshold, numCorticalColumns): """ Generate [numObjects] objects with [numPointsPerObject] points per object Train L4-l2 network all the objects with single pass learning Test on (feature, location) pairs and compute :param numObjects: :param numPointsPerObject: :param sampleSize: :param activationThreshold: :param numCorticalColumns: :return: """ l4Params = getL4Params() l2Params = getL2Params() l2Params["sampleSizeProximal"] = sampleSize # TODO l2Params["minThresholdProximal"] = activationThreshold l4ColumnCount = l4Params["columnCount"] numInputBits = int(l4Params["columnCount"]*0.02) objects = createObjectMachine( machineType="simple", numInputBits=numInputBits, sensorInputSize=l4ColumnCount, externalInputSize=l4ColumnCount, numCorticalColumns=numCorticalColumns, numLocations=NUM_LOCATIONS, numFeatures=NUM_FEATURES ) exp = L4L2Experiment("capacity_two_objects", numInputBits=numInputBits, L2Overrides=l2Params, L4Overrides=l4Params, inputSize=l4ColumnCount, externalInputSize=l4ColumnCount, numLearningPoints=4, numCorticalColumns=numCorticalColumns) pairs = createRandomObjects( numObjects, numPointsPerObject, NUM_LOCATIONS, NUM_FEATURES ) for object in pairs: objects.addObject(object) exp.learnObjects(objects.provideObjectsToLearn()) testResult = testOnSingleRandomSDR(objects, exp) return testResult
def runUncertainLocations(missingLoc=None, profile=False): """ Runs the same experiment as above, with missing locations at some timesteps during inference (if it was not successfully computed by the rest of the network for example). @param missingLoc (dict) A dictionary mapping indices in the object to location index to replace with during inference (-1 means no location, a tuple means an union of locations). @param profile (bool) If True, the network will be profiled after learning and inference """ if missingLoc is None: missingLoc = {} exp = L4L2Experiment( "uncertain_location", enableLateralSP = True, enableFeedForwardSP=True ) pairs = createThreeObjects() objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024 ) for object in pairs: objects.addObject(object) exp.learnObjects(objects.provideObjectsToLearn()) # create pairs with missing locations objectA = objects[0] for key, val in missingLoc.iteritems(): objectA[key] = (val, key) inferConfig = { "numSteps": 10, "pairs": { 0: objectA } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) if profile: exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation", "L4 Predictive"], )
def runDisambiguationByUnions(noiseLevel=None, profile=False): """ Runs a simple experiment where an object is disambiguated as each column recognizes a union of two objects, and the real object is the only common one. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment( "disambiguation_unions", numCorticalColumns=2, ) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.addObject([(1, 1), (2, 2)]) objects.addObject([(2, 2), (3, 3)]) objects.addObject([(3, 3), (4, 4)]) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile() inferConfig = { "numSteps": 6, "noiseLevel": noiseLevel, "pairs": { # this should activate 1 and 2 0: [(2, 2), (2, 2), (2, 2), (2, 2), (2, 2), (2, 2)], # this should activate 2 and 3 1: [(3, 3), (3, 3), (3, 3), (3, 3), (3, 3), (3, 3)] } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=1) if profile: exp.printProfile() exp.plotInferenceStats( fields=[ "L2 Representation", "Overlap L2 with object", "L4 Representation" ], onePlot=False, )
def runLateralDisambiguation(noiseLevel=None, profile=False): """ Runs a simple experiment where two objects share a (location, feature) pair. At inference, one column sees that ambiguous pair, and the other sees a unique one. We should see the first column rapidly converge to a unique representation. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment( "lateral_disambiguation", numCorticalColumns=2, ) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.addObject([(1, 1), (2, 2)]) objects.addObject([(1, 1), (3, 2)]) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile() inferConfig = { "noiseLevel": noiseLevel, "numSteps": 6, "pairs": { # this should activate 0 and 1 0: [(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)], # this should activate 1 1: [(3, 2), (3, 2), (3, 2), (3, 2), (3, 2), (3, 2)] } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=1) if profile: exp.printProfile() exp.plotInferenceStats( fields=[ "L2 Representation", "Overlap L2 with object", "L4 Representation" ], onePlot=False, )
def runLateralDisambiguation(noiseLevel=None, profile=False): """ Runs a simple experiment where two objects share a (location, feature) pair. At inference, one column sees that ambiguous pair, and the other sees a unique one. We should see the first column rapidly converge to a unique representation. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment( "lateral_disambiguation", numCorticalColumns=2, ) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.addObject([(1, 1), (2, 2)]) objects.addObject([(1, 1), (3, 2)]) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile() inferConfig = { "noiseLevel": noiseLevel, "numSteps": 6, "pairs": { # this should activate 0 and 1 0: [(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)], # this should activate 1 1: [(3, 2), (3, 2), (3, 2), (3, 2), (3, 2), (3, 2)] } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=1) if profile: exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], onePlot=False, )
def runDisambiguationByUnions(noiseLevel=None, profile=False): """ Runs a simple experiment where an object is disambiguated as each column recognizes a union of two objects, and the real object is the only common one. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment( "disambiguation_unions", numCorticalColumns=2, ) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.addObject([(1, 1), (2, 2)]) objects.addObject([(2, 2), (3, 3)]) objects.addObject([(3, 3), (4, 4)]) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile() inferConfig = { "numSteps": 6, "noiseLevel": noiseLevel, "pairs": { # this should activate 1 and 2 0: [(2, 2), (2, 2), (2, 2), (2, 2), (2, 2), (2, 2)], # this should activate 2 and 3 1: [(3, 3), (3, 3), (3, 3), (3, 3), (3, 3), (3, 3)] } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=1) if profile: exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], onePlot=False, )
def runCapacityTest(numObjects, numPointsPerObject, numCorticalColumns, l2Params, l4Params, objectParams, repeat=0): """ Generate [numObjects] objects with [numPointsPerObject] points per object Train L4-l2 network all the objects with single pass learning Test on (feature, location) pairs and compute :param numObjects: :param numPointsPerObject: :param sampleSize: :param activationThreshold: :param numCorticalColumns: :return: """ l4ColumnCount = l4Params["columnCount"] numInputBits = objectParams['numInputBits'] externalInputSize = objectParams['externalInputSize'] if numInputBits is None: numInputBits = int(l4ColumnCount * 0.02) objects = createObjectMachine(machineType="simple", numInputBits=numInputBits, sensorInputSize=l4ColumnCount, externalInputSize=externalInputSize, numCorticalColumns=numCorticalColumns, numLocations=NUM_LOCATIONS, numFeatures=NUM_FEATURES) exp = L4L2Experiment("capacity_two_objects", numInputBits=numInputBits, L2Overrides=l2Params, L4Overrides=l4Params, inputSize=l4ColumnCount, externalInputSize=externalInputSize, numLearningPoints=3, numCorticalColumns=numCorticalColumns) pairs = createRandomObjects(numObjects, numPointsPerObject, NUM_LOCATIONS, NUM_FEATURES) for object in pairs: objects.addObject(object) exp.learnObjects(objects.provideObjectsToLearn()) testResult = testOnSingleRandomSDR(objects, exp) testResult['repeatID'] = repeat return testResult
def runAmbiguities(noiseLevel=None, profile=False): """ Runs an experiment where three objects are being learnt, but share many patterns. At inference, only one object is being moved over, and we should see quick convergence. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment( "ambiguities", numCorticalColumns=2, ) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.addObject([(1, 1), (2, 1), (3, 3)]) objects.addObject([(2, 2), (3, 3), (2, 1)]) objects.addObject([(3, 1), (2, 1), (1, 2)]) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile() inferConfig = { "numSteps": 6, "noiseLevel": noiseLevel, "pairs": { 0: [(2, 1), (2, 1), (3, 3), (2, 2), (2, 2), (2, 2)], 1: [(3, 3), (3, 3), (3, 3), (2, 2), (2, 1), (2, 1)] } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=1) if profile: exp.printProfile() exp.plotInferenceStats( fields=[ "L2 Representation", "Overlap L2 with object", "L4 Representation" ], onePlot=False, )
def runAmbiguities(noiseLevel=None, profile=False): """ Runs an experiment where three objects are being learnt, but share many patterns. At inference, only one object is being moved over, and we should see quick convergence. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment( "ambiguities", numCorticalColumns=2, ) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.addObject([(1, 1), (2, 1), (3, 3)]) objects.addObject([(2, 2), (3, 3), (2, 1)]) objects.addObject([(3, 1), (2, 1), (1, 2)]) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile() inferConfig = { "numSteps": 6, "noiseLevel": noiseLevel, "pairs": { 0: [(2, 1), (2, 1), (3, 3), (2, 2), (2, 2), (2, 2)], 1: [(3, 3), (3, 3), (3, 3), (2, 2), (2, 1), (2, 1)] } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=1) if profile: exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], onePlot=False, )
def loadThingObjects(numCorticalColumns=1): """ Load simulated sensation data on a number of different objects There is one file per object, each row contains one feature, location pairs The format is as follows [(-33.6705, 75.5003, 2.4207)/10] => [[list of active bits of location], [list of active bits of feature]] The content before "=>" is the true 3D location / sensation The number of active bits in the location and feature is listed after "=>". @return A simple object machine """ # create empty simple object machine objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=numCorticalColumns, numFeatures=0, numLocations=0, ) for _ in range(numCorticalColumns): objects.locations.append([]) objects.features.append([]) objDataPath = 'data/' objFiles = [ f for f in os.listdir(objDataPath) if os.path.isfile(os.path.join(objDataPath, f)) ] idx = 0 for f in objFiles: objName = f.split('.')[0] objFile = open('{}/{}'.format(objDataPath, f)) sensationList = [] for line in objFile.readlines(): # parse thing data file and extract feature/location vectors sense = line.split('=>')[1].strip(' ').strip('\n') location = sense.split('],[')[0].strip('[') feature = sense.split('],[')[1].strip(']') location = np.fromstring(location, sep=',', dtype=np.uint8) feature = np.fromstring(feature, sep=',', dtype=np.uint8) # add the current sensation to object Machine sensationList.append((idx, idx)) for c in range(numCorticalColumns): objects.locations[c].append(set(location.tolist())) objects.features[c].append(set(feature.tolist())) idx += 1 objects.addObject(sensationList, objName) return objects
def runBasic(noiseLevel=None, profile=False): """ Runs a basic experiment on continuous locations, learning a few locations on four basic objects, and inferring one of them. This experiment is mostly used for testing the pipeline, as the learned locations are too random and sparse to actually perform inference. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment("basic_continuous", numCorticalColumns=2) objects = createObjectMachine( machineType="continuous", numInputBits=21, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2 ) objects.addObject(Sphere(radius=20), name="sphere") objects.addObject(Cylinder(height=50, radius=20), name="cylinder") objects.addObject(Box(dimensions=[10, 20, 30]), name="box") objects.addObject(Cube(width=20), name="cube") learnConfig = { "sphere": [("surface", 10)], # the two learning config below will be exactly the same "box": [("face", 5), ("edge", 5), ("vertex", 5)], "cube": [(feature, 5) for feature in objects["cube"].getFeatures()], "cylinder": [(feature, 5) for feature in objects["cylinder"].getFeatures()], } exp.learnObjects(objects.provideObjectsToLearn(learnConfig, plot=True), reset=True) if profile: exp.printProfile() inferConfig = { "numSteps": 4, "noiseLevel": noiseLevel, "objectName": "cube", "pairs": {0: ["face", "face", "edge", "edge"], 1: ["edge", "face", "face", "edge"]}, } exp.infer(objects.provideObjectToInfer(inferConfig, plot=True), objectName="cube", reset=True) if profile: exp.printProfile() exp.plotInferenceStats(fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"])
def setUp(self): # params maxNumSegments = 2 L2Overrides = { "learningRate": 0.1, "noise": 1e-8, "cellCount": 256, # new: 256 # original: 4096 "inputWidth": 8192, # new: 8192 # original: 16384 (?) "sdrSize": 5, "activationThreshold": 0.01, "useSupport": True } L4Overrides = { "learningRate": 0.1, "noise": 1e-8, "cellsPerColumn": 4, # new: 4 # original 32 "columnCount": 2048, # new: 2048 # original: 2048 "minThreshold": 0.35, } self.exp1 = L4L2Experiment( 'single_column', implementation='Bayesian', L2RegionType="py.BayesianColumnPoolerRegion", L4RegionType="py.BayesianApicalTMPairRegion", L2Overrides=L2Overrides, L4Overrides=L4Overrides, numCorticalColumns=1, maxSegmentsPerCell=maxNumSegments, numLearningPoints=3, # number repetitions for learning seed=1 ) numFeatures = 3 # new: 3 # original: 3 numPoints = 5 # new: 5 # original: 10 numLocations = 5 # new: 5 # original: 10 numObjects = 5 # new: 2 # original: 10 numRptsPerSensation = 2 self.objectMachine = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=3, seed=40, ) self.objectMachine.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures)
def runSharedFeatures(noiseLevel=None, profile=False): """ Runs a simple experiment where three objects share a number of location, feature pairs. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment( "shared_features", enableLateralSP=True, enableFeedForwardSP=True ) pairs = createThreeObjects() objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024 ) for object in pairs: objects.addObject(object) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile() inferConfig = { "numSteps": 10, "noiseLevel": noiseLevel, "pairs": { 0: zip(range(10), range(10)) } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) if profile: exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], )
def runCapacityTest(numObjects, numPointsPerObject, maxNewSynapseCount, activationThreshold, numCorticalColumns): """ Generate [numObjects] objects with [numPointsPerObject] points per object Train L4-l2 network all the objects with single pass learning Test on (feature, location) pairs and compute :param numObjects: :param numPointsPerObject: :param maxNewSynapseCount: :param activationThreshold: :param numCorticalColumns: :return: """ l4Params = getL4Params() l2Params = getL2Params() l2Params["maxNewProximalSynapseCount"] = maxNewSynapseCount l2Params["minThresholdProximal"] = activationThreshold l4ColumnCount = l4Params["columnCount"] numInputBits = int(l4Params["columnCount"] * 0.02) objects = createObjectMachine(machineType="simple", numInputBits=numInputBits, sensorInputSize=l4ColumnCount, externalInputSize=l4ColumnCount, numCorticalColumns=numCorticalColumns, numLocations=NUM_LOCATIONS, numFeatures=NUM_FEATURES) exp = L4L2Experiment("capacity_two_objects", numInputBits=numInputBits, L2Overrides=l2Params, L4Overrides=l4Params, inputSize=l4ColumnCount, externalInputSize=l4ColumnCount, numLearningPoints=4, numCorticalColumns=numCorticalColumns) pairs = createRandomObjects(numObjects, numPointsPerObject, NUM_LOCATIONS, NUM_FEATURES) for object in pairs: objects.addObject(object) exp.learnObjects(objects.provideObjectsToLearn()) testResult = testOnSingleRandomSDR(objects, exp) return testResult
def testCreateRandom(self): """Simple construction test.""" objects= createObjectMachine( machineType="simple", seed=42 ) objects.createRandomObjects(numObjects=10, numPoints=10, numLocations=10, numFeatures=10) self.assertEqual(len(objects), 10) # Num locations must be >= num points with self.assertRaises(AssertionError): objects.createRandomObjects(numObjects=10, numPoints=10, numLocations=9, numFeatures=10)
def testCreateRandom(self): """Simple construction test.""" objects = createObjectMachine(machineType="simple", seed=42) objects.createRandomObjects(numObjects=10, numPoints=10, numLocations=10, numFeatures=10) self.assertEqual(len(objects), 10) # Num locations must be >= num points with self.assertRaises(AssertionError): objects.createRandomObjects(numObjects=10, numPoints=10, numLocations=9, numFeatures=10)
def runUncertainLocations(missingLoc=None, profile=False): """ Runs the same experiment as above, with missing locations at some timesteps during inference (if it was not successfully computed by the rest of the network for example). @param missingLoc (dict) A dictionary mapping indices in the object to location index to replace with during inference (-1 means no location, a tuple means an union of locations). @param profile (bool) If True, the network will be profiled after learning and inference """ if missingLoc is None: missingLoc = {} exp = L4L2Experiment("uncertain_location", enableLateralSP=True, enableFeedForwardSP=True) pairs = createThreeObjects() objects = createObjectMachine(machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024) for object in pairs: objects.addObject(object) exp.learnObjects(objects.provideObjectsToLearn()) # create pairs with missing locations objectA = objects[0] for key, val in missingLoc.iteritems(): objectA[key] = (val, key) inferConfig = {"numSteps": 10, "pairs": {0: objectA}} exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) if profile: exp.printProfile() exp.plotInferenceStats(fields=[ "L2 Representation", "Overlap L2 with object", "L4 Representation", "L4 Predictive" ], )
def runSharedFeatures(noiseLevel=None, profile=False): """ Runs a simple experiment where three objects share a number of location, feature pairs. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment("shared_features", enableLateralSP=True, enableFeedForwardSP=True) pairs = createThreeObjects() objects = createObjectMachine(machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024) for object in pairs: objects.addObject(object) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile() inferConfig = { "numSteps": 10, "noiseLevel": noiseLevel, "pairs": { 0: zip(range(10), range(10)) } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) if profile: exp.printProfile() exp.plotInferenceStats(fields=[ "L2 Representation", "Overlap L2 with object", "L4 Representation" ], )
def runStretchExperiment(numObjects=25): """ Generates a lot of random objects to profile the network. Parameters: ---------------------------- @param numObjects (int) Number of objects to create and learn. """ exp = L4L2Experiment( "profiling_experiment", enableLateralSP = True, enableFeedForwardSP=True ) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024 ) objects.createRandomObjects(numObjects=numObjects, numPoints=10) exp.learnObjects(objects.provideObjectsToLearn()) exp.printProfile() inferConfig = { "numSteps": len(objects[0]), "pairs": { 0: objects[0] } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"] )
def runStretch(noiseLevel=None, profile=False): """ Stretch test that learns a lot of objects. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment( "stretch_L10_F10_C2", numCorticalColumns=2, ) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.createRandomObjects(10, 10, numLocations=10, numFeatures=10) print "Objects are:" for object, pairs in objects.objects.iteritems(): print str(object) + ": " + str(pairs) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile(reset=True) # For inference, we will check and plot convergence for object 0. We create a # sequence of random sensations for each column. We will present each # sensation for 4 time steps to let it settle and ensure it converges. objectCopy1 = [pair for pair in objects[0]] objectCopy2 = [pair for pair in objects[0]] objectCopy3 = [pair for pair in objects[0]] random.shuffle(objectCopy1) random.shuffle(objectCopy2) random.shuffle(objectCopy3) # stay multiple steps on each sensation objectSensations1 = [] for pair in objectCopy1: for _ in xrange(4): objectSensations1.append(pair) # stay multiple steps on each sensation objectSensations2 = [] for pair in objectCopy2: for _ in xrange(4): objectSensations2.append(pair) # stay multiple steps on each sensation objectSensations3 = [] for pair in objectCopy3: for _ in xrange(4): objectSensations3.append(pair) inferConfig = { "numSteps": len(objectSensations1), "noiseLevel": noiseLevel, "pairs": { 0: objectSensations1, 1: objectSensations2, # 2: objectSensations3, # Uncomment for 3 columns } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) if profile: exp.printProfile() exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], onePlot=False, )
def runStretch(noiseLevel=None, profile=False): """ Stretch test that learns a lot of objects. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment( "stretch_L10_F10_C2", numCorticalColumns=2, ) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.createRandomObjects(10, 10, numLocations=10, numFeatures=10) print "Objects are:" for object, pairs in objects.objects.iteritems(): print str(object) + ": " + str(pairs) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile(reset=True) # For inference, we will check and plot convergence for object 0. We create a # sequence of random sensations for each column. We will present each # sensation for 4 time steps to let it settle and ensure it converges. objectCopy1 = [pair for pair in objects[0]] objectCopy2 = [pair for pair in objects[0]] objectCopy3 = [pair for pair in objects[0]] random.shuffle(objectCopy1) random.shuffle(objectCopy2) random.shuffle(objectCopy3) # stay multiple steps on each sensation objectSensations1 = [] for pair in objectCopy1: for _ in xrange(4): objectSensations1.append(pair) # stay multiple steps on each sensation objectSensations2 = [] for pair in objectCopy2: for _ in xrange(4): objectSensations2.append(pair) # stay multiple steps on each sensation objectSensations3 = [] for pair in objectCopy3: for _ in xrange(4): objectSensations3.append(pair) inferConfig = { "numSteps": len(objectSensations1), "noiseLevel": noiseLevel, "pairs": { 0: objectSensations1, 1: objectSensations2, # 2: objectSensations3, # Uncomment for 3 columns } } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0) if profile: exp.printProfile() exp.plotInferenceStats( fields=[ "L2 Representation", "Overlap L2 with object", "L4 Representation" ], onePlot=False, )
def run_ideal_classifier(args={}): """ Create and train classifier using given parameters. """ numObjects = args.get("numObjects", 10) numLocations = args.get("numLocations", 10) numFeatures = args.get("numFeatures", 10) numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) useLocation = args.get("useLocation", 1) numColumns = args.get("numColumns", 1) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=150, externalInputSize=2400, numCorticalColumns=numColumns, numFeatures=numFeatures, numLocations=numLocations, seed=trialNum ) random.seed(trialNum) objects.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) objectSDRs = objects.provideObjectsToLearn() objectNames = objectSDRs.keys() featureWidth = objects.sensorInputSize locationWidth = objects.externalInputSize # compute the number of sensations across all objects numInputVectors = numPoints * numObjects if useLocation: inputWidth = featureWidth + locationWidth else: inputWidth = featureWidth # create "training" dataset data = np.zeros((numInputVectors, inputWidth)) label = np.zeros((numInputVectors, numObjects)) k = 0 for i in range(numObjects): numSensations = len(objectSDRs[objectNames[i]]) for j in range(numSensations): activeBitsFeature = np.array(list(objectSDRs[objectNames[i]][j][0][1])) data[k, activeBitsFeature] = 1 if useLocation: activeBitsLocation = np.array(list(objectSDRs[objectNames[i]][j][0][0])) data[k, featureWidth + activeBitsLocation] = 1 label[k, i] = 1 k += 1 # enumerate number of distinct "words". # Note: this could be done much more easily if we simply use the # location/feature pairs that are stored in the object machine. wordList = np.zeros((0, inputWidth), dtype='int32') featureList = np.zeros((numInputVectors,)) for i in range(numInputVectors): index = findWordInVocabulary(data[i, :], wordList) if index is not None: featureList[i] = index else: newWord = np.zeros((1, inputWidth), dtype='int32') newWord[0, :] = data[i, :] wordList = np.concatenate((wordList, newWord)) featureList[i] = wordList.shape[0] - 1 numWords = wordList.shape[0] # convert objects to vectorized word representations storedObjectRepresentations = np.zeros((numObjects, numWords), dtype=np.int32) k = 0 for i in range(numObjects): numSensations = len(objectSDRs[objectNames[i]]) for j in range(numSensations): index = findWordInVocabulary(data[k, :], wordList) storedObjectRepresentations[i, index] += 1 k += 1 # Cool plot of feature vectors # plt.figure() # plt.imshow(np.transpose(storedObjectRepresentations)) # plt.xlabel('Object #') # plt.ylabel('Word #') # plt.title("Object representations") # Create random order of sensations for each object objectSensations = [] for i in range(numObjects): senseList = [] wordIndices = np.where(storedObjectRepresentations[i, :])[0] # An object can have multiple instances of a word, in which case we # add all of them for w in wordIndices: senseList.extend(storedObjectRepresentations[i, w]*[w]) random.shuffle(senseList) objectSensations.append(senseList) # plot accuracy as a function of number of sensations accuracyList = [] classificationOutcome = np.zeros((numObjects, numPoints+1)) for sensationNumber in range(1, numPoints+1): bowVectorsTest = np.zeros((numObjects, numWords), dtype=np.int32) for objectId in range(numObjects): # No. sensations for object objectId sensations = objectSensations[objectId] numPointsToInclude = computeUniquePointsSensed(numColumns, len(sensations), sensationNumber) for j in range(numPointsToInclude): index = sensations[j] bowVectorsTest[objectId, index] += 1 # Count the number of correct classifications. # A correct classification is where object i is unambiguously recognized. numCorrect = 0 for i in range(numObjects): overlaps = classifierPredict(bowVectorsTest[i, :], storedObjectRepresentations) bestOverlap = max(overlaps) outcome = ( (overlaps[i] == bestOverlap) and len(np.where(overlaps==bestOverlap)[0]) == 1) numCorrect += outcome classificationOutcome[i, sensationNumber] = outcome accuracy = float(numCorrect) / numObjects accuracyList.append(accuracy) convergencePoint = np.zeros((numObjects, )) for i in range(numObjects): if np.max(classificationOutcome[i, :])>0: convergencePoint[i] = locateConvergencePoint(classificationOutcome[i, :]) else: convergencePoint[i] = 11 args.update({"accuracy": accuracyList}) args.update({"numTouches": range(1, 11)}) args.update({"convergencePoint": np.mean(convergencePoint)}) args.update({"classificationOutcome": classificationOutcome}) print "objects={}, features={}, locations={}, distinct words={}, numColumns={}".format( numObjects, numFeatures, numLocations, numWords, numColumns), print "==> convergencePoint:", args["convergencePoint"] return args
def runExperiment(args): """ Run experiment. What did you think this does? args is a dict representing the parameters. We do it this way to support multiprocessing. args contains one or more of the following keys: @param featureNoise (float) Noise level to add to the features during inference. Default: None @param locationNoise (float) Noise level to add to the locations during inference. Default: None @param numObjects (int) The number of objects we will train. Default: 10 @param numPoints (int) The number of points on each object. Default: 10 @param numLocations (int) For each point, the number of locations to choose from. Default: 10 @param numFeatures (int) For each point, the number of features to choose from. Default: 10 @param numColumns (int) The total number of cortical columns in network. Default: 2 @param networkType (string)The type of network to use. Options are: "MultipleL4L2Columns", "MultipleL4L2ColumnsWithTopology" and "MultipleL4L2ColumnsWithRandomTopology". Default: "MultipleL4L2Columns" @param longDistanceConnections (float) The probability that a column will connect to a distant column. Only relevant when using the random topology network type. If > 1, will instead be taken as desired number of long-distance connections per column. @param settlingTime (int) Number of iterations we wait to let columns stabilize. Important for multicolumn experiments with lateral connections. @param includeRandomLocation (bool) If True, a random location SDR will be generated during inference for each feature. @param enableFeedback (bool) If True, enable feedback, default is True @param numAmbiguousLocations (int) number of ambiguous locations. Ambiguous locations will present during inference if this parameter is set to be a positive number The method returns the args dict updated with multiple additional keys representing accuracy metrics. """ numObjects = args.get("numObjects", 10) numLocations = args.get("numLocations", 10) numFeatures = args.get("numFeatures", 10) numColumns = args.get("numColumns", 2) networkType = args.get("networkType", "MultipleL4L2Columns") longDistanceConnections = args.get("longDistanceConnections", 0) locationNoise = args.get("locationNoise", 0.0) featureNoise = args.get("featureNoise", 0.0) numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) plotInferenceStats = args.get("plotInferenceStats", True) settlingTime = args.get("settlingTime", 3) includeRandomLocation = args.get("includeRandomLocation", False) enableFeedback = args.get("enableFeedback", True) numAmbiguousLocations = args.get("numAmbiguousLocations", 0) numInferenceRpts = args.get("numInferenceRpts", 1) l2Params = args.get("l2Params", None) l4Params = args.get("l4Params", None) # Create the objects objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=150, externalInputSize=2400, numCorticalColumns=numColumns, numFeatures=numFeatures, numLocations=numLocations, seed=trialNum ) objects.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) r = objects.objectConfusion() print "Average common pairs in objects=", r[0], print ", locations=",r[1],", features=",r[2] # print "Total number of objects created:",len(objects.getObjects()) # print "Objects are:" # for o in objects: # pairs = objects[o] # pairs.sort() # print str(o) + ": " + str(pairs) # Setup experiment and train the network name = "convergence_O%03d_L%03d_F%03d_C%03d_T%03d" % ( numObjects, numLocations, numFeatures, numColumns, trialNum ) exp = L4L2Experiment( name, numCorticalColumns=numColumns, L2Overrides=l2Params, L4Overrides=l4Params, networkType = networkType, longDistanceConnections=longDistanceConnections, inputSize=150, externalInputSize=2400, numInputBits=20, seed=trialNum, enableFeedback=enableFeedback, ) exp.learnObjects(objects.provideObjectsToLearn()) # For inference, we will check and plot convergence for each object. For each # object, we create a sequence of random sensations for each column. We will # present each sensation for settlingTime time steps to let it settle and # ensure it converges. numCorrectClassifications=0 classificationPerSensation = numpy.zeros(settlingTime*numPoints) for objectId in objects: exp.sendReset() obj = objects[objectId] objectSensations = {} for c in range(numColumns): objectSensations[c] = [] if numColumns > 1: # Create sequence of random sensations for this object for all columns At # any point in time, ensure each column touches a unique loc,feature pair # on the object. It is ok for a given column to sense a loc,feature pair # more than once. The total number of sensations is equal to the number of # points on the object. for sensationNumber in range(len(obj)): # Randomly shuffle points for each sensation objectCopy = [pair for pair in obj] random.shuffle(objectCopy) for c in range(numColumns): # stay multiple steps on each sensation for _ in xrange(settlingTime): objectSensations[c].append(objectCopy[c]) else: # Create sequence of sensations for this object for one column. The total # number of sensations is equal to the number of points on the object. No # point should be visited more than once. objectCopy = [pair for pair in obj] random.shuffle(objectCopy) for pair in objectCopy: # stay multiple steps on each sensation for _ in xrange(settlingTime): objectSensations[0].append(pair) inferConfig = { "object": objectId, "numSteps": len(objectSensations[0]), "pairs": objectSensations, "noiseLevel": featureNoise, "locationNoise": locationNoise, "includeRandomLocation": includeRandomLocation, "numAmbiguousLocations": numAmbiguousLocations, } inferenceSDRs = objects.provideObjectToInfer(inferConfig) exp.infer(inferenceSDRs, objectName=objectId, reset=False) classificationPerSensation += numpy.array( exp.statistics[objectId]["Correct classification"]) if exp.isObjectClassified(objectId, minOverlap=30): numCorrectClassifications += 1 if plotInferenceStats: exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], experimentID=objectId, onePlot=False, ) convergencePoint, accuracy = exp.averageConvergencePoint("L2 Representation", 30, 40, settlingTime) classificationAccuracy = float(numCorrectClassifications) / numObjects classificationPerSensation = classificationPerSensation / numObjects print "# objects {} # features {} # locations {} # columns {} trial # {} network type {}".format( numObjects, numFeatures, numLocations, numColumns, trialNum, networkType) print "Average convergence point=",convergencePoint print "Classification accuracy=",classificationAccuracy print # Return our convergence point as well as all the parameters and objects args.update({"objects": objects.getObjects()}) args.update({"convergencePoint":convergencePoint}) args.update({"classificationAccuracy":classificationAccuracy}) args.update({"classificationPerSensation":classificationPerSensation.tolist()}) # Can't pickle experiment so can't return it for batch multiprocessing runs. # However this is very useful for debugging when running in a single thread. if plotInferenceStats: args.update({"experiment": exp}) return args
def runCapacityTest(numObjects, numPointsPerObject, numCorticalColumns, l2Params, l4Params, objectParams, networkType="MultipleL4L2Columns", repeat=0): """ Generate [numObjects] objects with [numPointsPerObject] points per object Train L4-l2 network all the objects with single pass learning Test on (feature, location) pairs and compute :param numObjects: :param numPointsPerObject: :param sampleSize: :param activationThreshold: :param numCorticalColumns: :return: """ l4ColumnCount = l4Params["columnCount"] numInputBits = objectParams['numInputBits'] externalInputSize = objectParams['externalInputSize'] if numInputBits is None: numInputBits = int(l4ColumnCount * 0.02) numLocations = objectParams["numLocations"] numFeatures = objectParams["numFeatures"] objects = createObjectMachine(machineType="simple", numInputBits=numInputBits, sensorInputSize=l4ColumnCount, externalInputSize=externalInputSize, numCorticalColumns=numCorticalColumns, numLocations=numLocations, numFeatures=numFeatures) exp = L4L2Experiment("capacity_two_objects", numInputBits=numInputBits, L2Overrides=l2Params, L4Overrides=l4Params, inputSize=l4ColumnCount, networkType=networkType, externalInputSize=externalInputSize, numLearningPoints=3, numCorticalColumns=numCorticalColumns, objectNamesAreIndices=True) if objectParams["uniquePairs"]: pairs = createRandomObjects(numObjects, numPointsPerObject, numLocations, numFeatures) else: pairs = createRandomObjectsSharedPairs(numObjects, numPointsPerObject, numLocations, numFeatures) for object in pairs: objects.addObject(object) exp.learnObjects(objects.provideObjectsToLearn()) testResult = testOnSingleRandomSDR(objects, exp, 100, repeat) return testResult
def runExperiment(args): """ Run experiment. What did you think this does? args is a dict representing the parameters. We do it this way to support multiprocessing. args contains one or more of the following keys: @param noiseLevel (float) Noise level to add to the locations and features during inference. Default: None @param profile (bool) If True, the network will be profiled after learning and inference. Default: False @param numObjects (int) The number of objects we will train. Default: 10 @param numPoints (int) The number of points on each object. Default: 10 @param numLocations (int) For each point, the number of locations to choose from. Default: 10 @param numFeatures (int) For each point, the number of features to choose from. Default: 10 @param numColumns (int) The total number of cortical columns in network. Default: 2 The method returns the args dict updated with two additional keys: convergencePoint (int) The average number of iterations it took to converge across all objects objects (pairs) The list of objects we trained on """ numObjects = args.get("numObjects", 10) numLocations = args.get("numLocations", 10) numFeatures = args.get("numFeatures", 10) numColumns = args.get("numColumns", 2) profile = args.get("profile", False) noiseLevel = args.get("noiseLevel", None) # TODO: implement this? numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) l2Params = args.get("l2Params", getL2Params()) l4Params = args.get("l4Params", getL4Params()) objectSeed = args.get("objectSeed", 41) # Create the objects objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=numColumns, seed=objectSeed, ) objects.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) # print "Objects are:" # for o in objects: # pairs = objects[o] # pairs.sort() # print str(o) + ": " + str(pairs) # Setup experiment and train the network name = "convergence_O%03d_L%03d_F%03d_C%03d_T%03d" % ( numObjects, numLocations, numFeatures, numColumns, trialNum ) exp = L4L2Experiment( name, L2Overrides=l2Params, L4Overrides=l4Params, numCorticalColumns=numColumns, seed=trialNum ) exp.learnObjects(objects.provideObjectsToLearn()) L2TimeLearn = 0 L2TimeInfer = 0 if profile: # exp.printProfile(reset=True) L2TimeLearn = getProfileInfo(exp) args.update({"L2TimeLearn": L2TimeLearn}) exp.resetProfile() # For inference, we will check and plot convergence for each object. For each # object, we create a sequence of random sensations for each column. We will # present each sensation for 3 time steps to let it settle and ensure it # converges. for objectId in objects: obj = objects[objectId] # Create sequence of sensations for this object for all columns objectSensations = {} for c in range(numColumns): objectCopy = [pair for pair in obj] random.shuffle(objectCopy) # stay multiple steps on each sensation sensations = [] for pair in objectCopy: for _ in xrange(2): sensations.append(pair) objectSensations[c] = sensations inferConfig = { "object": objectId, "numSteps": len(objectSensations[0]), "pairs": objectSensations } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=objectId) if profile: L2TimeInfer += getProfileInfo(exp) exp.resetProfile() # exp.printProfile(reset=True) if profile: L2TimeInfer /= len(objects) args.update({"L2TimeInfer": L2TimeInfer}) convergencePoint, _ = exp.averageConvergencePoint("L2 Representation", 40, 40) print "objectSeed {} # distal syn {} # proximal syn {}, " \ "# convergence point={:4.2f} train time {:4.3f} infer time {:4.3f}".format( objectSeed, l2Params["sampleSizeDistal"], l2Params["sampleSizeProximal"], convergencePoint, L2TimeLearn, L2TimeInfer) # Return our convergence point as well as all the parameters and objects args.update({"objects": objects.getObjects()}) args.update({"convergencePoint": convergencePoint}) # prepare experiment results numLateralConnections = [] numProximalConnections = [] for l2Columns in exp.L2Columns: numLateralConnections.append( l2Columns._pooler.numberOfDistalSynapses()) numProximalConnections.append( np.sum(l2Columns._pooler.numberOfProximalSynapses())) result = { 'trial': objectSeed, 'L2TimeLearn': args['L2TimeLearn'], 'L2TimeInfer': args['L2TimeInfer'], 'sampleSizeProximal': l2Params["sampleSizeProximal"], 'sampleSizeDistal': l2Params["sampleSizeDistal"], 'numLateralConnections': np.mean(np.array(numLateralConnections)), 'numProximalConnections': np.mean(np.array(numProximalConnections)), 'convergencePoint': args['convergencePoint']} return result
def runExperiment(): """ We will run two experiments side by side, with either single column or 3 columns """ numColumns = 1 # 3 numFeatures = 3 numPoints = 10 numLocations = 10 numObjects = 10 # 2 numRptsPerSensation = 2 objectMachine = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=numColumns, seed=40, ) objectMachine.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) objects = objectMachine.provideObjectsToLearn() # single-out the inputs to the column #1 objectsSingleColumn = {} for i in range(numObjects): featureLocations = [] for j in range(numLocations): featureLocations.append({0: objects[i][j][0]}) objectsSingleColumn[i] = featureLocations maxNumSegments = 2 # we will run two experiments side by side, with either single column # or 3 columns # exp3 = L4L2Experiment( # 'three_column', # implementation='BayesianApicalTiebreak', # L4RegionType="py.BayesianApicalTMPairRegion", # numCorticalColumns=3, # maxSegmentsPerCell=5, # seed=1 # ) exp1 = L4L2Experiment('single_column', implementation='SummingBayesian', L2RegionType="py.BayesianColumnPoolerRegion", L4RegionType="py.BayesianApicalTMPairRegion", numCorticalColumns=1, maxSegmentsPerCell=maxNumSegments, seed=1) print "train single column " exp1.learnObjects(objectsSingleColumn) # print "train multi-column " # exp3.learnObjects(objects) # test on the first object objectId = 0 obj = objectMachine[objectId] # Create sequence of sensations for this object for all columns # We need to set the seed to get specific convergence points for the red # rectangle in the graph. objectSensations = {} random.seed(12) for c in range(numColumns): objectCopy = [pair for pair in obj] random.shuffle(objectCopy) # stay multiple steps on each sensation sensations = [] for pair in objectCopy: for _ in xrange(numRptsPerSensation): sensations.append(pair) objectSensations[c] = sensations sensationStepsSingleColumn = [] sensationStepsMultiColumn = [] for step in xrange(len(objectSensations[0])): pairs = [objectSensations[col][step] for col in xrange(numColumns)] sdrs = objectMachine._getSDRPairs(pairs) sensationStepsMultiColumn.append(sdrs) sensationStepsSingleColumn.append({0: sdrs[0]}) # print "inference: multi-columns " # exp3.sendReset() # l2ActiveCellsMultiColumn = [] # L2ActiveCellNVsTimeMultiColumn = [] # for sensation in sensationStepsMultiColumn: # exp3.infer([sensation], objectName=objectId, reset=False) # l2ActiveCellsMultiColumn.append(exp3.getL2Representations()) # activeCellNum = 0 # for c in range(numColumns): # activeCellNum += len(exp3.getL2Representations()[c]) # L2ActiveCellNVsTimeMultiColumn.append(activeCellNum / numColumns) print "inference: single column " exp1.sendReset() l2ActiveCellsSingleColumn = [] L2ActiveCellNVsTimeSingleColumn = [] for sensation in sensationStepsSingleColumn: exp1.infer([sensation], objectName=objectId, reset=False) rep = exp1.getL2Representations() l2ActiveCellsSingleColumn.append(rep) print "\n\nRepresentation", rep print "Length Representation", len(rep[0]) L2ActiveCellNVsTimeSingleColumn.append(len(rep[0])) # Used to figure out where to put the red rectangle! sdrSize = exp1.config["L2Params"]["sdrSize"] singleColumnHighlight = next( (idx for idx, value in enumerate(l2ActiveCellsSingleColumn) if len(value[0]) == sdrSize), None) firstObjectRepresentation = exp1.objectL2Representations[0][0] converged = next((idx for idx, value in enumerate(l2ActiveCellsSingleColumn) if (value[0] == firstObjectRepresentation)), None) print "Exactly SDR-Size activity (%s) after %s steps" % ( sdrSize, singleColumnHighlight) print "Converged to first object representation after %s steps" % converged print "First Object representation", firstObjectRepresentation print "L2 Output over steps", l2ActiveCellsSingleColumn
def run_ideal_classifier(args={}): """ Create and train classifier using given parameters. """ numObjects = args.get("numObjects", 10) numLocations = args.get("numLocations", 10) numFeatures = args.get("numFeatures", 10) numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) useLocation = args.get("useLocation", 1) numColumns = args.get("numColumns", 1) objects = createObjectMachine(machineType="simple", numInputBits=20, sensorInputSize=150, externalInputSize=2400, numCorticalColumns=numColumns, numFeatures=numFeatures, numLocations=numLocations, seed=trialNum) random.seed(trialNum) objects.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) objectSDRs = objects.provideObjectsToLearn() objectNames = objectSDRs.keys() featureWidth = objects.sensorInputSize locationWidth = objects.externalInputSize # compute the number of sensations across all objects numInputVectors = numPoints * numObjects if useLocation: inputWidth = featureWidth + locationWidth else: inputWidth = featureWidth # create "training" dataset data = np.zeros((numInputVectors, inputWidth)) label = np.zeros((numInputVectors, numObjects)) k = 0 for i in range(numObjects): numSensations = len(objectSDRs[objectNames[i]]) for j in range(numSensations): activeBitsFeature = np.array( list(objectSDRs[objectNames[i]][j][0][1])) data[k, activeBitsFeature] = 1 if useLocation: activeBitsLocation = np.array( list(objectSDRs[objectNames[i]][j][0][0])) data[k, featureWidth + activeBitsLocation] = 1 label[k, i] = 1 k += 1 # enumerate number of distinct "words". # Note: this could be done much more easily if we simply use the # location/feature pairs that are stored in the object machine. wordList = np.zeros((0, inputWidth), dtype='int32') featureList = np.zeros((numInputVectors, )) for i in range(numInputVectors): index = findWordInVocabulary(data[i, :], wordList) if index is not None: featureList[i] = index else: newWord = np.zeros((1, inputWidth), dtype='int32') newWord[0, :] = data[i, :] wordList = np.concatenate((wordList, newWord)) featureList[i] = wordList.shape[0] - 1 numWords = wordList.shape[0] # convert objects to vectorized word representations storedObjectRepresentations = np.zeros((numObjects, numWords), dtype=np.int32) k = 0 for i in range(numObjects): numSensations = len(objectSDRs[objectNames[i]]) for j in range(numSensations): index = findWordInVocabulary(data[k, :], wordList) storedObjectRepresentations[i, index] += 1 k += 1 # Cool plot of feature vectors # plt.figure() # plt.imshow(np.transpose(storedObjectRepresentations)) # plt.xlabel('Object #') # plt.ylabel('Word #') # plt.title("Object representations") # Create random order of sensations for each object objectSensations = [] for i in range(numObjects): senseList = [] wordIndices = np.where(storedObjectRepresentations[i, :])[0] # An object can have multiple instances of a word, in which case we # add all of them for w in wordIndices: senseList.extend(storedObjectRepresentations[i, w] * [w]) random.shuffle(senseList) objectSensations.append(senseList) # plot accuracy as a function of number of sensations accuracyList = [] classificationOutcome = np.zeros((numObjects, numPoints + 1)) for sensationNumber in range(1, numPoints + 1): bowVectorsTest = np.zeros((numObjects, numWords), dtype=np.int32) for objectId in range(numObjects): # No. sensations for object objectId sensations = objectSensations[objectId] numPointsToInclude = computeUniquePointsSensed( numColumns, len(sensations), sensationNumber) for j in range(numPointsToInclude): index = sensations[j] bowVectorsTest[objectId, index] += 1 # Count the number of correct classifications. # A correct classification is where object i is unambiguously recognized. numCorrect = 0 for i in range(numObjects): overlaps = classifierPredict(bowVectorsTest[i, :], storedObjectRepresentations) bestOverlap = max(overlaps) outcome = ((overlaps[i] == bestOverlap) and len(np.where(overlaps == bestOverlap)[0]) == 1) numCorrect += outcome classificationOutcome[i, sensationNumber] = outcome accuracy = float(numCorrect) / numObjects accuracyList.append(accuracy) convergencePoint = np.zeros((numObjects, )) for i in range(numObjects): if np.max(classificationOutcome[i, :]) > 0: convergencePoint[i] = locateConvergencePoint( classificationOutcome[i, :]) else: convergencePoint[i] = 11 args.update({"accuracy": accuracyList}) args.update({"numTouches": range(1, 11)}) args.update({"convergencePoint": np.mean(convergencePoint)}) args.update({"classificationOutcome": classificationOutcome}) print "objects={}, features={}, locations={}, distinct words={}, numColumns={}".format( numObjects, numFeatures, numLocations, numWords, numColumns), print "==> convergencePoint:", args["convergencePoint"] return args
def testDelayedLateralandApicalInputs(self): """Test whether lateral and apical inputs are synchronized across columns""" # Set up experiment exp = l2_l4_inference.L4L2Experiment( name="sample", numCorticalColumns=2, ) objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.addObject([(1, 1), (2, 2)]) objects.addObject([(1, 1), (3, 2)]) exp.learnObjects(objects.provideObjectsToLearn()) exp._sendReset() sensationC0 = [(1, 1), (1, 1), (1, 1)] sensationC1 = [(3, 2), (3, 2), (3, 2)] lateralInputs = [] apicalInputs = [] activeCells = [] for step in range(3): inferConfig = { "noiseLevel": None, "numSteps": 1, "pairs": {0: [sensationC0[step]], 1: [sensationC1[step]]} } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=1, reset=False) lateralInputs.append( {0: copy.copy( exp.network.regions['L2Column_0'].getInputData("lateralInput")), 1: copy.copy( exp.network.regions['L2Column_1'].getInputData("lateralInput"))} ) activeCells.append( {0: copy.copy( exp.network.regions['L2Column_0'].getOutputData("feedForwardOutput")), 1: copy.copy( exp.network.regions['L2Column_1'].getOutputData("feedForwardOutput"))} ) apicalInputs.append(( {0: copy.copy( exp.network.regions['L4Column_0'].getInputData("apicalInput")), 1: copy.copy( exp.network.regions['L4Column_1'].getInputData("apicalInput"))} )) # no lateral inputs on first iteration self.assertEqual(numpy.sum(numpy.abs(lateralInputs[0][0])), 0) self.assertEqual(numpy.sum(numpy.abs(lateralInputs[0][1])), 0) # no apical inputs to L4 on first iteration self.assertEqual(numpy.sum(numpy.abs(apicalInputs[0][0])), 0) self.assertEqual(numpy.sum(numpy.abs(apicalInputs[0][1])), 0) # lateral inputs of C0 at time t+1 = active cells of C1 at time t for step in range(2): self.assertEqual( numpy.sum(numpy.abs(lateralInputs[step+1][0]-activeCells[step][1])), 0) self.assertEqual( numpy.sum(numpy.abs(lateralInputs[step+1][1]-activeCells[step][0])), 0) # apical inputs of L4_0 at time t+1 = active cells of L2_0 at time t for step in range(2): self.assertEqual( numpy.sum(numpy.abs(apicalInputs[step + 1][0] - activeCells[step][0])), 0) self.assertEqual( numpy.sum(numpy.abs(apicalInputs[step + 1][1] - activeCells[step][1])), 0)
def run_bag_of_words_classifier(args): numObjects = args.get("numObjects", 10) numLocations = args.get("numLocations", 10) numFeatures = args.get("numFeatures", 10) numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) pointRange = args.get("pointRange", 1) useLocation = args.get("useLocation", 1) numColumns = args.get("numColumns", 1) objects = createObjectMachine(machineType="simple", numInputBits=20, sensorInputSize=150, externalInputSize=2400, numCorticalColumns=numColumns, numFeatures=numFeatures, numLocations=numLocations, seed=trialNum) random.seed(trialNum) for p in range(pointRange): objects.createRandomObjects(numObjects, numPoints=numPoints + p, numLocations=numLocations, numFeatures=numFeatures) objects = objects.provideObjectsToLearn() objectNames = objects.keys() numObjs = len(objectNames) featureWidth = 150 locationWidth = 2400 # compute the number of sensations across all objects numInputVectors = 0 for i in range(numObjs): numInputVectors += len(objects[objectNames[i]]) if useLocation: inputWidth = featureWidth + locationWidth else: inputWidth = featureWidth # create "training" dataset data = np.zeros((numInputVectors, inputWidth)) label = np.zeros((numInputVectors, numObjs)) k = 0 for i in range(numObjs): # print "converting object {} ...".format(i) numSenses = len(objects[objectNames[i]]) for j in range(numSenses): activeBitsFeature = np.array(list( objects[objectNames[i]][j][0][1])) data[k, activeBitsFeature] = 1 if useLocation: activeBitsLocation = np.array( list(objects[objectNames[i]][j][0][0])) data[k, featureWidth + activeBitsLocation] = 1 label[k, i] = 1 k += 1 # enumerate number of distinct "words" wordList = np.zeros((0, inputWidth), dtype='int32') featureList = np.zeros((data.shape[0], )) for i in range(data.shape[0]): findWord = False for j in range(wordList.shape[0]): index = findWordInVocabulary(data[i, :], wordList) if index is not None: featureList[i] = index findWord = True break if findWord is False: newWord = np.zeros((1, inputWidth), dtype='int32') newWord[0, :] = data[i, :] wordList = np.concatenate((wordList, newWord)) featureList[i] = wordList.shape[0] - 1 numWords = wordList.shape[0] # wordList = wordList[np.random.permutation(np.arange(numWords)), :] print "object # {} feature # {} location # {} distinct words # {} numColumns {}".format( numObjects, numFeatures, numLocations, numWords, numColumns) # convert objects to BOW representations bowVectors = np.zeros((numObjs, numWords), dtype=np.int32) k = 0 for i in range(numObjs): numSenses = len(objects[objectNames[i]]) for j in range(numSenses): index = findWordInVocabulary(data[k, :], wordList) bowVectors[i, index] += 1 k += 1 # plt.figure() # plt.imshow(np.transpose(bowVectors)) # plt.xlabel('Object #') # plt.ylabel('Word #') # plt.title("BoW representations") objects = [] for i in range(numObjs): senseList = [] senses = np.where(bowVectors[i, :])[0] for sense in senses.tolist(): # print bowVectors[i, sense] for _ in range(bowVectors[i, sense]): senseList.append(sense) random.shuffle(senseList) objects.append(senseList) # plot accuracy as a function of number of sensations accuracyList = [] classificationOutcome = np.zeros((numObjs, 11)) for maxSenses in range(1, 11): bowVectorsTest = np.zeros((numObjs, numWords), dtype=np.int32) offset = 0 for i in range(numObjs): numSenses = min(len(objects[objectNames[i]]), maxSenses) # # # sensations for object i # senses = np.where(bowVectors[i, :])[0] senses = objects[i] # if i==42: # print senses for c in range(numColumns): for j in range(numSenses): # index = np.random.choice(senses) index = senses[j] # index = findWordInVocabulary(data[offset + j, :], wordList) bowVectorsTest[i, index] += 1 offset += len(objects[objectNames[i]]) numCorrect = 0 for i in range(numObjs): output = bowClassifierPredict(bowVectorsTest[i, :], bowVectors) # if i==42: # print # print bowVectorsTest[i, :] # print bowVectors[i, :] # print "maxSenses=", maxSenses # print -output # print -output[42] predictLabel = np.argmin(output) outcome = predictLabel == i numCorrect += outcome classificationOutcome[i, maxSenses] = outcome accuracy = float(numCorrect) / numObjs accuracyList.append(accuracy) # print "maxSenses {} accuracy {}".format(maxSenses, accuracy) convergencePoint = np.zeros((numObjs, )) for i in range(numObjs): # if i==42: # print "obj ", i, "result: ", classificationOutcome[i, :] # print locateConvergencePoint(classificationOutcome[i, :]) if np.max(classificationOutcome[i, :]) > 0: convergencePoint[i] = locateConvergencePoint( classificationOutcome[i, :]) # convergencePoint[i] = np.where(classificationOutcome[i, :] == 1)[0][0] else: convergencePoint[i] = 11 args.update({"accuracy": accuracyList}) args.update({"numTouches": range(1, 11)}) args.update({"convergencePoint": np.mean(convergencePoint)}) args.update({"classificationOutcome": classificationOutcome}) return args
def runExperiment(args): """ Run experiment. What did you think this does? args is a dict representing the parameters. We do it this way to support multiprocessing. args contains one or more of the following keys: @param featureNoise (float) Noise level to add to the features during inference. Default: None @param locationNoise (float) Noise level to add to the locations during inference. Default: None @param numObjects (int) The number of objects we will train. Default: 10 @param numPoints (int) The number of points on each object. Default: 10 @param numLocations (int) For each point, the number of locations to choose from. Default: 10 @param numFeatures (int) For each point, the number of features to choose from. Default: 10 @param numColumns (int) The total number of cortical columns in network. Default: 2 @param networkType (string)The type of network to use. Options are: "MultipleL4L2Columns", "MultipleL4L2ColumnsWithTopology" and "MultipleL4L2ColumnsWithRandomTopology". Default: "MultipleL4L2Columns" @param longDistanceConnections (float) The probability that a column will connect to a distant column. Only relevant when using the random topology network type. If > 1, will instead be taken as desired number of long-distance connections per column. @param settlingTime (int) Number of iterations we wait to let columns stabilize. Important for multicolumn experiments with lateral connections. @param includeRandomLocation (bool) If True, a random location SDR will be generated during inference for each feature. @param enableFeedback (bool) If True, enable feedback, default is True @param numAmbiguousLocations (int) number of ambiguous locations. Ambiguous locations will present during inference if this parameter is set to be a positive number The method returns the args dict updated with multiple additional keys representing accuracy metrics. """ numObjects = args.get("numObjects", 10) numLocations = args.get("numLocations", 10) numFeatures = args.get("numFeatures", 10) numColumns = args.get("numColumns", 2) networkType = args.get("networkType", "MultipleL4L2Columns") longDistanceConnections = args.get("longDistanceConnections", 0) locationNoise = args.get("locationNoise", 0.0) featureNoise = args.get("featureNoise", 0.0) numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) plotInferenceStats = args.get("plotInferenceStats", True) settlingTime = args.get("settlingTime", 3) includeRandomLocation = args.get("includeRandomLocation", False) enableFeedback = args.get("enableFeedback", True) numAmbiguousLocations = args.get("numAmbiguousLocations", 0) numInferenceRpts = args.get("numInferenceRpts", 1) l2Params = args.get("l2Params", None) l4Params = args.get("l4Params", None) # Create the objects objects = createObjectMachine(machineType="simple", numInputBits=20, sensorInputSize=150, externalInputSize=2400, numCorticalColumns=numColumns, numFeatures=numFeatures, numLocations=numLocations, seed=trialNum) objects.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) r = objects.objectConfusion() print "Average common pairs in objects=", r[0], print ", locations=", r[1], ", features=", r[2] # print "Total number of objects created:",len(objects.getObjects()) # print "Objects are:" # for o in objects: # pairs = objects[o] # pairs.sort() # print str(o) + ": " + str(pairs) # Setup experiment and train the network name = "convergence_O%03d_L%03d_F%03d_C%03d_T%03d" % ( numObjects, numLocations, numFeatures, numColumns, trialNum) exp = L4L2Experiment( name, numCorticalColumns=numColumns, L2Overrides=l2Params, L4Overrides=l4Params, networkType=networkType, longDistanceConnections=longDistanceConnections, inputSize=150, externalInputSize=2400, numInputBits=20, seed=trialNum, enableFeedback=enableFeedback, ) exp.learnObjects(objects.provideObjectsToLearn()) # For inference, we will check and plot convergence for each object. For each # object, we create a sequence of random sensations for each column. We will # present each sensation for settlingTime time steps to let it settle and # ensure it converges. numCorrectClassifications = 0 for objectId in objects: exp.sendReset() obj = objects[objectId] objectSensations = {} for c in range(numColumns): objectSensations[c] = [] if numColumns > 1: # Create sequence of random sensations for this object for all columns At # any point in time, ensure each column touches a unique loc,feature pair # on the object. It is ok for a given column to sense a loc,feature pair # more than once. The total number of sensations is equal to the number of # points on the object. for sensationNumber in range(len(obj)): # Randomly shuffle points for each sensation objectCopy = [pair for pair in obj] random.shuffle(objectCopy) for c in range(numColumns): # stay multiple steps on each sensation for _ in xrange(settlingTime): objectSensations[c].append(objectCopy[c]) else: # Create sequence of sensations for this object for one column. The total # number of sensations is equal to the number of points on the object. No # point should be visited more than once. objectCopy = [pair for pair in obj] random.shuffle(objectCopy) for pair in objectCopy: # stay multiple steps on each sensation for _ in xrange(settlingTime): objectSensations[0].append(pair) inferConfig = { "object": objectId, "numSteps": len(objectSensations[0]), "pairs": objectSensations, "noiseLevel": featureNoise, "locationNoise": locationNoise, "includeRandomLocation": includeRandomLocation, "numAmbiguousLocations": numAmbiguousLocations, } inferenceSDRs = objects.provideObjectToInfer(inferConfig) exp.infer(inferenceSDRs, objectName=objectId, reset=False) classificationResults = exp.getCurrentClassification( 30, includeZeros=False) # print "Classification for object",objectId, "=", classificationResults if (classificationResults.get(objectId, 0.0) == 1.0 and len(classificationResults) == 1): numCorrectClassifications += 1 if plotInferenceStats: exp.plotInferenceStats( fields=[ "L2 Representation", "Overlap L2 with object", "L4 Representation" ], experimentID=objectId, onePlot=False, ) convergencePoint = exp.averageConvergencePoint("L2 Representation", 30, 40, settlingTime) classificationAccuracy = float(numCorrectClassifications) / numObjects print "# objects {} # features {} # locations {} # columns {} trial # {} network type {}".format( numObjects, numFeatures, numLocations, numColumns, trialNum, networkType) print "Average convergence point=", convergencePoint print "Classification accuracy=", classificationAccuracy print # Return our convergence point as well as all the parameters and objects args.update({"objects": objects.getObjects()}) args.update({"convergencePoint": convergencePoint}) args.update({"classificationAccuracy": classificationAccuracy}) # Can't pickle experiment so can't return it for batch multiprocessing runs. # However this is very useful for debugging when running in a single thread. if plotInferenceStats: args.update({"experiment": exp}) return args
def runExperiment(args): """ Run experiment. args is a dict representing the parameters. We do it this way to support multiprocessing. The method returns the args dict updated with multiple additional keys representing accuracy metrics. """ numObjects = args.get("numObjects", 10) numLocations = args.get("numLocations", 10) numFeatures = args.get("numFeatures", 10) numColumns = args.get("numColumns", 2) sensorInputSize = args.get("sensorInputSize", 300) networkType = args.get("networkType", "MultipleL4L2Columns") longDistanceConnections = args.get("longDistanceConnections", 0) locationNoise = args.get("locationNoise", 0.0) featureNoise = args.get("featureNoise", 0.0) numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) plotInferenceStats = args.get("plotInferenceStats", True) settlingTime = args.get("settlingTime", 3) includeRandomLocation = args.get("includeRandomLocation", False) enableFeedback = args.get("enableFeedback", True) numAmbiguousLocations = args.get("numAmbiguousLocations", 0) numInferenceRpts = args.get("numInferenceRpts", 1) numLearningRpts = args.get("numLearningRpts", 3) l2Params = args.get("l2Params", None) l4Params = args.get("l4Params", None) # Create the objects objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=sensorInputSize, externalInputSize=2400, numCorticalColumns=numColumns, numFeatures=numFeatures, numLocations=numLocations, seed=trialNum ) objects.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) r = objects.objectConfusion() print "Average common pairs in objects=", r[0], print ", locations=",r[1],", features=",r[2] # print "Total number of objects created:",len(objects.getObjects()) # print "Objects are:" # for o in objects: # pairs = objects[o] # pairs.sort() # print str(o) + ": " + str(pairs) # This object machine will simulate objects where each object is just one # unique feature/location pair. We will use this to pretrain L4/L2 with # individual pairs. pairObjects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=sensorInputSize, externalInputSize=2400, numCorticalColumns=numColumns, numFeatures=numFeatures, numLocations=numLocations, seed=trialNum ) # Create "pair objects" consisting of all unique F/L pairs from our objects. # These pairs should have the same SDRs as the original objects. pairObjects.locations = objects.locations pairObjects.features = objects.features distinctPairs = objects.getDistinctPairs() print "Number of distinct feature/location pairs:",len(distinctPairs) for pairNumber,pair in enumerate(distinctPairs): pairObjects.addObject([pair], pairNumber) ##################################################### # # Setup experiment and train the network name = "dp_O%03d_L%03d_F%03d_C%03d_T%03d" % ( numObjects, numLocations, numFeatures, numColumns, trialNum ) exp = L4L2Experiment( name, numCorticalColumns=numColumns, L2Overrides=l2Params, L4Overrides=l4Params, networkType = networkType, longDistanceConnections=longDistanceConnections, inputSize=sensorInputSize, externalInputSize=2400, numInputBits=20, seed=trialNum, enableFeedback=enableFeedback, numLearningPoints=numLearningRpts, ) # Learn all FL pairs in each L4 and in each L2 # Learning in L2 involves choosing a small random number of cells, growing # proximal synapses to L4 cells. Growing distal synapses to active cells in # each neighboring column. Each column gets its own distal segment. exp.learnObjects(pairObjects.provideObjectsToLearn()) # Verify that all columns learned the pairs # numCorrectClassifications = 0 # for pairId in pairObjects: # # obj = pairObjects[pairId] # objectSensations = {} # for c in range(numColumns): # objectSensations[c] = [obj[0]]*settlingTime # # inferConfig = { # "object": pairId, # "numSteps": settlingTime, # "pairs": objectSensations, # } # # inferenceSDRs = pairObjects.provideObjectToInfer(inferConfig) # # exp.infer(inferenceSDRs, objectName=pairId, reset=False) # # if exp.isObjectClassified(pairId, minOverlap=30): # numCorrectClassifications += 1 # # exp.sendReset() # # print "Classification accuracy for pairs=",100.0*numCorrectClassifications/len(distinctPairs) ######################################################################## # # Create "object representations" in L2 by simultaneously invoking the union # of all FL pairs in an object and doing some sort of spatial pooling to # create L2 representation. exp.resetStatistics() for objectId in objects: # Create one sensation per object consisting of the union of all features # and the union of locations. ul, uf = objects.getUniqueFeaturesLocationsInObject(objectId) print "Object",objectId,"Num unique features:",len(uf),"Num unique locations:",len(ul) objectSensations = {} for c in range(numColumns): objectSensations[c] = [(tuple(ul), tuple(uf))]*settlingTime inferConfig = { "object": objectId, "numSteps": settlingTime, "pairs": objectSensations, } inferenceSDRs = objects.provideObjectToInfer(inferConfig) exp.infer(inferenceSDRs, objectName="Object "+str(objectId)) # Compute confusion matrix between all objects as network settles for iteration in range(settlingTime): confusion = numpy.zeros((numObjects, numObjects)) for o1 in objects: for o2 in objects: confusion[o1, o2] = len(set(exp.statistics[o1]["Full L2 SDR C0"][iteration]) & set(exp.statistics[o2]["Full L2 SDR C0"][iteration]) ) plt.figure() plt.imshow(confusion) plt.xlabel('Object #') plt.ylabel('Object #') plt.title("Object overlaps") plt.colorbar() plt.savefig("confusion_random_10L_5F_"+str(iteration)+".pdf") plt.close() for col in range(numColumns): print "Diagnostics for column",col printColumnPoolerDiagnostics(exp.getAlgorithmInstance(column=col)) print return args
def runBasic(noiseLevel=None, profile=False): """ Runs a basic experiment on continuous locations, learning a few locations on four basic objects, and inferring one of them. This experiment is mostly used for testing the pipeline, as the learned locations are too random and sparse to actually perform inference. Parameters: ---------------------------- @param noiseLevel (float) Noise level to add to the locations and features during inference @param profile (bool) If True, the network will be profiled after learning and inference """ exp = L4L2Experiment("basic_continuous", numCorticalColumns=2) objects = createObjectMachine( machineType="continuous", numInputBits=21, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=2, ) objects.addObject(Sphere(radius=20), name="sphere") objects.addObject(Cylinder(height=50, radius=20), name="cylinder") objects.addObject(Box(dimensions=[ 10, 20, 30, ]), name="box") objects.addObject(Cube(width=20), name="cube") learnConfig = { "sphere": [("surface", 10)], # the two learning config below will be exactly the same "box": [("face", 5), ("edge", 5), ("vertex", 5)], "cube": [(feature, 5) for feature in objects["cube"].getFeatures()], "cylinder": [(feature, 5) for feature in objects["cylinder"].getFeatures()] } exp.learnObjects(objects.provideObjectsToLearn(learnConfig, plot=True), reset=True) if profile: exp.printProfile() inferConfig = { "numSteps": 4, "noiseLevel": noiseLevel, "objectName": "cube", "pairs": { 0: ["face", "face", "edge", "edge"], 1: ["edge", "face", "face", "edge"] } } exp.infer(objects.provideObjectToInfer(inferConfig, plot=True), objectName="cube", reset=True) if profile: exp.printProfile() exp.plotInferenceStats(fields=[ "L2 Representation", "Overlap L2 with object", "L4 Representation" ], )
return fig if __name__ == "__main__": numColumns = 3 numFeatures = 3 numPoints = 10 numLocations = 10 numObjects = 10 numRptsPerSensation = 2 objectMachine = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=3, seed=40, ) objectMachine.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) objects = objectMachine.provideObjectsToLearn() # single-out the inputs to the column #1 objectsSingleColumn = {} for i in range(numObjects): featureLocations = [] for j in range(numLocations): featureLocations.append({0: objects[i][j][0]})
def runExperiment(args): """ Run experiment. What did you think this does? args is a dict representing the parameters. We do it this way to support multiprocessing. args contains one or more of the following keys: @param noiseLevel (float) Noise level to add to the locations and features during inference. Default: None @param profile (bool) If True, the network will be profiled after learning and inference. Default: False @param numObjects (int) The number of objects we will train. Default: 10 @param numPoints (int) The number of points on each object. Default: 10 @param numLocations (int) For each point, the number of locations to choose from. Default: 10 @param numFeatures (int) For each point, the number of features to choose from. Default: 10 @param numColumns (int) The total number of cortical columns in network. Default: 2 The method returns the args dict updated with two additional keys: convergencePoint (int) The average number of iterations it took to converge across all objects objects (pairs) The list of objects we trained on """ numObjects = args.get("numObjects", 10) numLocations = args.get("numLocations", 10) numFeatures = args.get("numFeatures", 10) numColumns = args.get("numColumns", 2) profile = args.get("profile", False) noiseLevel = args.get("noiseLevel", None) # TODO: implement this? numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) plotInferenceStats = args.get("plotInferenceStats", True) # Create the objects objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=numColumns, ) objects.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) # print "Objects are:" # for o in objects: # pairs = objects[o] # pairs.sort() # print str(o) + ": " + str(pairs) # Setup experiment and train the network name = "convergence_O%03d_L%03d_F%03d_C%03d_T%03d" % ( numObjects, numLocations, numFeatures, numColumns, trialNum ) exp = L4L2Experiment( name, numCorticalColumns=numColumns, seed=trialNum ) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile(reset=True) # For inference, we will check and plot convergence for each object. For each # object, we create a sequence of random sensations for each column. We will # present each sensation for 3 time steps to let it settle and ensure it # converges. for objectId in objects: obj = objects[objectId] # Create sequence of sensations for this object for all columns objectSensations = {} for c in range(numColumns): objectCopy = [pair for pair in obj] random.shuffle(objectCopy) # stay multiple steps on each sensation sensations = [] for pair in objectCopy: for _ in xrange(2): sensations.append(pair) objectSensations[c] = sensations inferConfig = { "object": objectId, "numSteps": len(objectSensations[0]), "pairs": objectSensations } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=objectId) if profile: exp.printProfile(reset=True) if plotInferenceStats: exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], experimentID=objectId, onePlot=False, ) convergencePoint = averageConvergencePoint( exp.getInferenceStats(),"L2 Representation", 40) print print "# objects {} # features {} # locations {} # columns {} trial # {}".format( numObjects, numFeatures, numLocations, numColumns, trialNum) print "Average convergence point=",convergencePoint # Return our convergence point as well as all the parameters and objects args.update({"objects": objects.getObjects()}) args.update({"convergencePoint":convergencePoint}) # Can't pickle experiment so can't return it. However this is very useful # for debugging when running in a single thread. # args.update({"experiment": exp}) return args
def runExperiment(args): """ Runs the experiment. What did you think this does? args is a dict representing the parameters. We do it this way to support multiprocessing. args contains one or more of the following keys: @param noiseLevel (float) Noise level to add to the locations and features during inference. Default: None @param numSequences (int) The number of objects (sequences) we will train. Default: 10 @param seqLength (int) The number of points on each object (length of each sequence). Default: 10 @param numFeatures (int) For each point, the number of features to choose from. Default: 10 @param numColumns (int) The total number of cortical columns in network. Default: 2 The method returns the args dict updated with two additional keys: convergencePoint (int) The average number of iterations it took to converge across all objects objects (pairs) The list of objects we trained on """ numObjects = args.get("numObjects", 10) numSequences = args.get("numSequences", 10) numFeatures = args.get("numFeatures", 10) numColumns = args.get("numColumns", 1) networkType = args.get("networkType", "L4L2TMColumn") noiseLevel = args.get("noiseLevel", None) # TODO: implement this? seqLength = args.get("seqLength", 10) numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) plotInferenceStats = args.get("plotInferenceStats", True) inputSize = args.get("inputSize", 512) numLocations = args.get("numLocations", 100000) numInputBits = args.get("inputBits", 20) settlingTime = args.get("settlingTime", 3) random.seed(trialNum) ##################################################### # # Create the sequences and objects, and make sure they share the # same features and locations. sequences = createObjectMachine( machineType="sequence", numInputBits=numInputBits, sensorInputSize=inputSize, externalInputSize=1024, numCorticalColumns=numColumns, numFeatures=numFeatures, # numLocations=numLocations, seed=trialNum ) objects = createObjectMachine( machineType="simple", numInputBits=numInputBits, sensorInputSize=inputSize, externalInputSize=1024, numCorticalColumns=numColumns, numFeatures=numFeatures, # numLocations=numLocations, seed=trialNum ) # Make sure they share the same features and locations objects.locations = sequences.locations objects.features = sequences.features sequences.createRandomSequences(numSequences, seqLength) objects.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) r = sequences.objectConfusion() print "Average common pairs in sequences=", r[0], print ", features=",r[2] r = objects.objectConfusion() print "Average common pairs in objects=", r[0], print ", locations=",r[1], print ", features=",r[2] print "Total number of objects created:",len(objects.getObjects()) print "Objects are:" for o in objects: pairs = objects[o] pairs.sort() print str(o) + ": " + str(pairs) print "Total number of sequences created:",len(sequences.getObjects()) print "Sequences:" for i in sequences: print i,sequences[i] ##################################################### # # Setup experiment and train the network name = "combined_sequences_S%03d_F%03d_L%03d_T%03d" % ( numSequences, numFeatures, numLocations, trialNum ) exp = L4TMExperiment( name=name, numCorticalColumns=numColumns, networkType = networkType, inputSize=inputSize, numExternalInputBits=numInputBits, externalInputSize=1024, numInputBits=numInputBits, seed=trialNum, L4Overrides={"initialPermanence": 0.41, "activationThreshold": 18, "minThreshold": 18, "basalPredictedSegmentDecrement": 0.0001}, ) # Train the network on all the sequences print "Training sequences" for seqName in sequences: # Make sure we learn enough times to deal with high order sequences and # remove extra predictions. for p in range(3*seqLength): # Ensure we generate new random location for each sequence presentation objectSDRs = sequences.provideObjectsToLearn([seqName]) exp.learnObjects(objectSDRs, reset=False) # TM needs reset between sequences, but not other regions exp.TMColumns[0].reset() # L2 needs resets when we switch to new object exp.sendReset() # Train the network on all the objects # We want to traverse the features of each object randomly a few times before # moving on to the next object. Create the SDRs that we need for this. print "Training objects" objectsToLearn = objects.provideObjectsToLearn() objectTraversals = {} for objectId in objectsToLearn: objectTraversals[objectId+numSequences] = objects.randomTraversal( objectsToLearn[objectId], settlingTime) # Train the network on all the SDRs for all the objects exp.learnObjects(objectTraversals) ##################################################### # # For inference, we will randomly pick an object or a sequence and # check and plot convergence for each item. for trial,itemType in enumerate(["sequence", "object", "sequence", "object", "sequence", "sequence", "object", "sequence", ]): # itemType = ["sequence", "object"][random.randint(0, 1)] if itemType == "sequence": objectId = random.randint(0, numSequences-1) obj = sequences[objectId] objectSensations = {} for c in range(numColumns): objectSensations[c] = [] # Create sequence of sensations for this object for one column. The total # number of sensations is equal to the number of points on the object. No # point should be visited more than once. objectCopy = [pair for pair in obj] for pair in objectCopy: objectSensations[0].append(pair) inferConfig = { "object": objectId + numSequences, "numSteps": len(objectSensations[0]), "pairs": objectSensations, } inferenceSDRs = sequences.provideObjectToInfer(inferConfig) exp.infer(inferenceSDRs, objectName=objectId) else: objectId = random.randint(0, numObjects-1) # For each object, we create a sequence of random sensations. We will # present each sensation for one time step. obj = objects[objectId] objectSensations = {} objectSensations[0] = [] # Create sequence of sensations for this object for one column. The total # number of sensations is equal to the number of points on the object. No # point should be visited more than once. objectCopy = [pair for pair in obj] random.shuffle(objectCopy) for pair in objectCopy: objectSensations[0].append(pair) inferConfig = { "object": objectId, "numSteps": len(objectSensations[0]), "pairs": objectSensations, "includeRandomLocation": False, } inferenceSDRs = objects.provideObjectToInfer(inferConfig) objectId += numSequences exp.infer(inferenceSDRs, objectName=objectId) if plotInferenceStats: plotOneInferenceRun( exp.statistics[trial], fields=[ # ("L4 Predicted", "Predicted sensorimotor cells"), # ("L2 Representation", "L2 Representation"), # ("L4 Representation", "Active sensorimotor cells"), ("L4 PredictedActive", "Predicted active cells in sensorimotor layer"), ("TM NextPredicted", "Predicted cells in temporal sequence layer"), ("TM PredictedActive", "Predicted active cells in temporal sequence layer"), ], basename=exp.name, itemType=itemType, experimentID=trial, plotDir=os.path.join(os.path.dirname(os.path.realpath(__file__)), "detailed_plots") ) # if plotInferenceStats: # plotMultipleInferenceRun( # exp.statistics[0:10], # fields=[ # # ("L4 Predicted", "Predicted sensorimotor cells"), # # ("L2 Representation", "L2 Representation"), # # ("L4 Representation", "Active sensorimotor cells"), # ("L4 PredictedActive", "Predicted active cells in sensorimotor layer"), # # ("TM NextPredicted", "Predicted cells in temporal sequence layer"), # ("TM PredictedActive", # "Predicted active cells in temporal sequence layer"), # ], # basename=exp.name, # plotDir=os.path.join(os.path.dirname(os.path.realpath(__file__)), # "detailed_plots") # ) # Compute overall inference statistics infStats = exp.getInferenceStats() convergencePoint, accuracy = averageConvergencePoint( infStats,"L2 Representation", 30, 40, 1) predictedActive = numpy.zeros(len(infStats)) predicted = numpy.zeros(len(infStats)) predictedActiveL4 = numpy.zeros(len(infStats)) predictedL4 = numpy.zeros(len(infStats)) for i,stat in enumerate(infStats): predictedActive[i] = float(sum(stat["TM PredictedActive C0"][2:])) / len(stat["TM PredictedActive C0"][2:]) predicted[i] = float(sum(stat["TM NextPredicted C0"][2:])) / len(stat["TM NextPredicted C0"][2:]) predictedActiveL4[i] = float(sum(stat["L4 PredictedActive C0"])) / len(stat["L4 PredictedActive C0"]) predictedL4[i] = float(sum(stat["L4 Predicted C0"])) / len(stat["L4 Predicted C0"]) print "# Sequences {} # features {} # columns {} trial # {} network type {}".format( numSequences, numFeatures, numColumns, trialNum, networkType) print "Average convergence point=",convergencePoint, print "Accuracy:", accuracy print # Return our convergence point as well as all the parameters and objects args.update({"objects": sequences.getObjects()}) args.update({"convergencePoint":convergencePoint}) args.update({"sensorimotorAccuracyPct": accuracy}) args.update({"averagePredictions": predicted.mean()}) args.update({"averagePredictedActive": predictedActive.mean()}) args.update({"averagePredictionsL4": predictedL4.mean()}) args.update({"averagePredictedActiveL4": predictedActiveL4.mean()}) args.update({"name": exp.name}) args.update({"statistics": exp.statistics}) # Can't pickle experiment so can't return it for batch multiprocessing runs. # However this is very useful for debugging when running in a single thread. # if plotInferenceStats: # args.update({"experiment": exp}) return args
def runExperiment(): """ We will run two experiments side by side, with either single column or 3 columns """ numColumns = 3 numFeatures = 3 numPoints = 10 numLocations = 10 numObjects = 10 numRptsPerSensation = 2 objectMachine = createObjectMachine(machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=3, seed=40) objectMachine.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) objects = objectMachine.provideObjectsToLearn() # single-out the inputs to the column #1 objectsSingleColumn = {} for i in range(numObjects): featureLocations = [] for j in range(numLocations): featureLocations.append({0: objects[i][j][0]}) objectsSingleColumn[i] = featureLocations # we will run two experiments side by side, with either single column # or 3 columns exp3 = L4L2Experiment('three_column', numCorticalColumns=3, seed=1) exp1 = L4L2Experiment('single_column', numCorticalColumns=1, seed=1) print "train single column " exp1.learnObjects(objectsSingleColumn) print "train multi-column " exp3.learnObjects(objects) # test on the first object objectId = 0 obj = objectMachine[objectId] # Create sequence of sensations for this object for all columns # We need to set the seed to get specific convergence points for the red # rectangle in the graph. objectSensations = {} random.seed(12) for c in range(numColumns): objectCopy = [pair for pair in obj] random.shuffle(objectCopy) # stay multiple steps on each sensation sensations = [] for pair in objectCopy: for _ in xrange(numRptsPerSensation): sensations.append(pair) objectSensations[c] = sensations sensationStepsSingleColumn = [] sensationStepsMultiColumn = [] for step in xrange(len(objectSensations[0])): pairs = [objectSensations[col][step] for col in xrange(numColumns)] sdrs = objectMachine._getSDRPairs(pairs) sensationStepsMultiColumn.append(sdrs) sensationStepsSingleColumn.append({0: sdrs[0]}) print "inference: multi-columns " exp3.sendReset() l2ActiveCellsMultiColumn = [] L2ActiveCellNVsTimeMultiColumn = [] for sensation in sensationStepsMultiColumn: exp3.infer([sensation], objectName=objectId, reset=False) l2ActiveCellsMultiColumn.append(exp3.getL2Representations()) activeCellNum = 0 for c in range(numColumns): activeCellNum += len(exp3.getL2Representations()[c]) L2ActiveCellNVsTimeMultiColumn.append(activeCellNum / numColumns) print "inference: single column " exp1.sendReset() l2ActiveCellsSingleColumn = [] L2ActiveCellNVsTimeSingleColumn = [] for sensation in sensationStepsSingleColumn: exp1.infer([sensation], objectName=objectId, reset=False) l2ActiveCellsSingleColumn.append(exp1.getL2Representations()) L2ActiveCellNVsTimeSingleColumn.append( len(exp1.getL2Representations()[0])) # Used to figure out where to put the red rectangle! sdrSize = exp1.config["L2Params"]["sdrSize"] singleColumnHighlight = next( (idx for idx, value in enumerate(l2ActiveCellsSingleColumn) if len(value[0]) == sdrSize), None) sdrSize = exp3.config["L2Params"]["sdrSize"] multiColumnHighlight = next( (idx for idx, value in enumerate(l2ActiveCellsMultiColumn) if len(value[0]) == sdrSize), None) plotActivity(l2ActiveCellsMultiColumn, multiColumnHighlight) plotActivity(l2ActiveCellsSingleColumn, singleColumnHighlight) plotL2ObjectRepresentations(exp1)
def runExperiment(args): """ Run experiment. What did you think this does? args is a dict representing the parameters. We do it this way to support multiprocessing. args contains one or more of the following keys: @param noiseLevel (float) Noise level to add to the locations and features during inference. Default: None @param profile (bool) If True, the network will be profiled after learning and inference. Default: False @param numObjects (int) The number of objects we will train. Default: 10 @param numPoints (int) The number of points on each object. Default: 10 @param pointRange (int) Creates objects each with points ranging from [numPoints,...,numPoints+pointRange-1] A total of numObjects * pointRange objects will be created. Default: 1 @param numLocations (int) For each point, the number of locations to choose from. Default: 10 @param numFeatures (int) For each point, the number of features to choose from. Default: 10 @param numColumns (int) The total number of cortical columns in network. Default: 2 @param settlingTime (int) Number of iterations we wait to let columns stabilize. Important for multicolumn experiments with lateral connections. @param includeRandomLocation (bool) If True, a random location SDR will be generated during inference for each feature. The method returns the args dict updated with two additional keys: convergencePoint (int) The average number of iterations it took to converge across all objects objects (pairs) The list of objects we trained on """ numObjects = args.get("numObjects", 10) numLocations = args.get("numLocations", 10) numFeatures = args.get("numFeatures", 10) numColumns = args.get("numColumns", 2) profile = args.get("profile", False) noiseLevel = args.get("noiseLevel", None) # TODO: implement this? numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) pointRange = args.get("pointRange", 1) plotInferenceStats = args.get("plotInferenceStats", True) settlingTime = args.get("settlingTime", 3) includeRandomLocation = args.get("includeRandomLocation", False) # Create the objects objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=150, externalInputSize=2400, numCorticalColumns=numColumns, numFeatures=numFeatures, seed=trialNum ) for p in range(pointRange): objects.createRandomObjects(numObjects, numPoints=numPoints+p, numLocations=numLocations, numFeatures=numFeatures) objectConfusion(objects.getObjects()) # print "Total number of objects created:",len(objects.getObjects()) # print "Objects are:" # for o in objects: # pairs = objects[o] # pairs.sort() # print str(o) + ": " + str(pairs) # Setup experiment and train the network name = "convergence_O%03d_L%03d_F%03d_C%03d_T%03d" % ( numObjects, numLocations, numFeatures, numColumns, trialNum ) exp = L4L2Experiment( name, numCorticalColumns=numColumns, inputSize=150, externalInputSize=2400, numInputBits=20, seed=trialNum ) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile(reset=True) # For inference, we will check and plot convergence for each object. For each # object, we create a sequence of random sensations for each column. We will # present each sensation for settlingTime time steps to let it settle and # ensure it converges. for objectId in objects: obj = objects[objectId] objectSensations = {} for c in range(numColumns): objectSensations[c] = [] if numColumns > 1: # Create sequence of random sensations for this object for all columns At # any point in time, ensure each column touches a unique loc,feature pair # on the object. It is ok for a given column to sense a loc,feature pair # more than once. The total number of sensations is equal to the number of # points on the object. for sensationNumber in range(len(obj)): # Randomly shuffle points for each sensation objectCopy = [pair for pair in obj] random.shuffle(objectCopy) for c in range(numColumns): # stay multiple steps on each sensation for _ in xrange(settlingTime): objectSensations[c].append(objectCopy[c]) else: # Create sequence of sensations for this object for one column. The total # number of sensations is equal to the number of points on the object. No # point should be visited more than once. objectCopy = [pair for pair in obj] random.shuffle(objectCopy) for pair in objectCopy: # stay multiple steps on each sensation for _ in xrange(settlingTime): objectSensations[0].append(pair) inferConfig = { "object": objectId, "numSteps": len(objectSensations[0]), "pairs": objectSensations, "includeRandomLocation": includeRandomLocation, } inferenceSDRs = objects.provideObjectToInfer(inferConfig) exp.infer(inferenceSDRs, objectName=objectId) if profile: exp.printProfile(reset=True) if plotInferenceStats: exp.plotInferenceStats( fields=["L2 Representation", "Overlap L2 with object", "L4 Representation"], experimentID=objectId, onePlot=False, ) convergencePoint = averageConvergencePoint( exp.getInferenceStats(),"L2 Representation", 30, 40, settlingTime) print print "# objects {} # features {} # locations {} # columns {} trial # {}".format( numObjects, numFeatures, numLocations, numColumns, trialNum) print "Average convergence point=",convergencePoint # Return our convergence point as well as all the parameters and objects args.update({"objects": objects.getObjects()}) args.update({"convergencePoint":convergencePoint}) # Can't pickle experiment so can't return it for batch multiprocessing runs. # However this is very useful for debugging when running in a single thread. if plotInferenceStats: args.update({"experiment": exp}) return args
def runExperiment(args): """ Run experiment. What did you think this does? args is a dict representing the parameters. We do it this way to support multiprocessing. args contains one or more of the following keys: @param noiseLevel (float) Noise level to add to the locations and features during inference. Default: None @param profile (bool) If True, the network will be profiled after learning and inference. Default: False @param numObjects (int) The number of objects we will train. Default: 10 @param numPoints (int) The number of points on each object. Default: 10 @param numLocations (int) For each point, the number of locations to choose from. Default: 10 @param numFeatures (int) For each point, the number of features to choose from. Default: 10 @param numColumns (int) The total number of cortical columns in network. Default: 2 The method returns the args dict updated with two additional keys: convergencePoint (int) The average number of iterations it took to converge across all objects objects (pairs) The list of objects we trained on """ numObjects = args.get("numObjects", 10) numLocations = args.get("numLocations", 10) numFeatures = args.get("numFeatures", 10) numColumns = args.get("numColumns", 2) profile = args.get("profile", False) noiseLevel = args.get("noiseLevel", None) # TODO: implement this? numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) # Create the objects objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=numColumns, ) objects.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) print "Objects are:" for o in objects: pairs = objects[o] pairs.sort() print str(o) + ": " + str(pairs) # Setup experiment and train the network name = "convergence_O%03d_L%03d_F%03d_C%03d_T%03d" % ( numObjects, numLocations, numFeatures, numColumns, trialNum) exp = L4L2Experiment(name, numCorticalColumns=numColumns, seed=trialNum) exp.learnObjects(objects.provideObjectsToLearn()) if profile: exp.printProfile(reset=True) # For inference, we will check and plot convergence for each object. For each # object, we create a sequence of random sensations for each column. We will # present each sensation for 3 time steps to let it settle and ensure it # converges. for objectId in objects: obj = objects[objectId] # Create sequence of sensations for this object for all columns objectSensations = {} for c in range(numColumns): objectCopy = [pair for pair in obj] random.shuffle(objectCopy) # stay multiple steps on each sensation sensations = [] for pair in objectCopy: for _ in xrange(2): sensations.append(pair) objectSensations[c] = sensations inferConfig = { "object": objectId, "numSteps": len(objectSensations[0]), "pairs": objectSensations } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=objectId) if profile: exp.printProfile(reset=True) exp.plotInferenceStats( fields=[ "L2 Representation", "Overlap L2 with object", "L4 Representation" ], experimentID=objectId, onePlot=False, ) convergencePoint = averageConvergencePoint(exp.getInferenceStats(), "L2 Representation", 40) print "Average convergence point=", convergencePoint # Return our convergence point as well as all the parameters and objects args.update({"objects": objects.getObjects()}) args.update({"convergencePoint": convergencePoint}) # Can't pickle experiment so can't return it. However this is very useful # for debugging when running in a single thread. # args.update({"experiment": exp}) return args
def runExperiment(args): """ Run experiment. args is a dict representing the parameters. We do it this way to support multiprocessing. The method returns the args dict updated with multiple additional keys representing accuracy metrics. """ numObjects = args.get("numObjects", 10) numLocations = args.get("numLocations", 10) numFeatures = args.get("numFeatures", 10) numColumns = args.get("numColumns", 2) sensorInputSize = args.get("sensorInputSize", 300) networkType = args.get("networkType", "MultipleL4L2Columns") longDistanceConnections = args.get("longDistanceConnections", 0) locationNoise = args.get("locationNoise", 0.0) featureNoise = args.get("featureNoise", 0.0) numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) plotInferenceStats = args.get("plotInferenceStats", True) settlingTime = args.get("settlingTime", 3) includeRandomLocation = args.get("includeRandomLocation", False) enableFeedback = args.get("enableFeedback", True) numAmbiguousLocations = args.get("numAmbiguousLocations", 0) numInferenceRpts = args.get("numInferenceRpts", 1) numLearningRpts = args.get("numLearningRpts", 3) l2Params = args.get("l2Params", None) l4Params = args.get("l4Params", None) # Create the objects objects = createObjectMachine(machineType="simple", numInputBits=20, sensorInputSize=sensorInputSize, externalInputSize=2400, numCorticalColumns=numColumns, numFeatures=numFeatures, numLocations=numLocations, seed=trialNum) objects.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) r = objects.objectConfusion() print "Average common pairs in objects=", r[0], print ", locations=", r[1], ", features=", r[2] # print "Total number of objects created:",len(objects.getObjects()) # print "Objects are:" # for o in objects: # pairs = objects[o] # pairs.sort() # print str(o) + ": " + str(pairs) # This object machine will simulate objects where each object is just one # unique feature/location pair. We will use this to pretrain L4/L2 with # individual pairs. pairObjects = createObjectMachine(machineType="simple", numInputBits=20, sensorInputSize=sensorInputSize, externalInputSize=2400, numCorticalColumns=numColumns, numFeatures=numFeatures, numLocations=numLocations, seed=trialNum) # Create "pair objects" consisting of all unique F/L pairs from our objects. # These pairs should have the same SDRs as the original objects. pairObjects.locations = objects.locations pairObjects.features = objects.features distinctPairs = objects.getDistinctPairs() print "Number of distinct feature/location pairs:", len(distinctPairs) for pairNumber, pair in enumerate(distinctPairs): pairObjects.addObject([pair], pairNumber) ##################################################### # # Setup experiment and train the network name = "dp_O%03d_L%03d_F%03d_C%03d_T%03d" % ( numObjects, numLocations, numFeatures, numColumns, trialNum) exp = L4L2Experiment( name, numCorticalColumns=numColumns, L2Overrides=l2Params, L4Overrides=l4Params, networkType=networkType, longDistanceConnections=longDistanceConnections, inputSize=sensorInputSize, externalInputSize=2400, numInputBits=20, seed=trialNum, enableFeedback=enableFeedback, numLearningPoints=numLearningRpts, ) # Learn all FL pairs in each L4 and in each L2 # Learning in L2 involves choosing a small random number of cells, growing # proximal synapses to L4 cells. Growing distal synapses to active cells in # each neighboring column. Each column gets its own distal segment. exp.learnObjects(pairObjects.provideObjectsToLearn()) # Verify that all columns learned the pairs # numCorrectClassifications = 0 # for pairId in pairObjects: # # obj = pairObjects[pairId] # objectSensations = {} # for c in range(numColumns): # objectSensations[c] = [obj[0]]*settlingTime # # inferConfig = { # "object": pairId, # "numSteps": settlingTime, # "pairs": objectSensations, # } # # inferenceSDRs = pairObjects.provideObjectToInfer(inferConfig) # # exp.infer(inferenceSDRs, objectName=pairId, reset=False) # # if exp.isObjectClassified(pairId, minOverlap=30): # numCorrectClassifications += 1 # # exp.sendReset() # # print "Classification accuracy for pairs=",100.0*numCorrectClassifications/len(distinctPairs) ######################################################################## # # Create "object representations" in L2 by simultaneously invoking the union # of all FL pairs in an object and doing some sort of spatial pooling to # create L2 representation. exp.resetStatistics() for objectId in objects: # Create one sensation per object consisting of the union of all features # and the union of locations. ul, uf = objects.getUniqueFeaturesLocationsInObject(objectId) print "Object", objectId, "Num unique features:", len( uf), "Num unique locations:", len(ul) objectSensations = {} for c in range(numColumns): objectSensations[c] = [(tuple(ul), tuple(uf))] * settlingTime inferConfig = { "object": objectId, "numSteps": settlingTime, "pairs": objectSensations, } inferenceSDRs = objects.provideObjectToInfer(inferConfig) exp.infer(inferenceSDRs, objectName="Object " + str(objectId)) # Compute confusion matrix between all objects as network settles for iteration in range(settlingTime): confusion = numpy.zeros((numObjects, numObjects)) for o1 in objects: for o2 in objects: confusion[o1, o2] = len( set(exp.statistics[o1]["Full L2 SDR C0"][iteration]) & set(exp.statistics[o2]["Full L2 SDR C0"][iteration])) plt.figure() plt.imshow(confusion) plt.xlabel('Object #') plt.ylabel('Object #') plt.title("Object overlaps") plt.colorbar() plt.savefig("confusion_random_10L_5F_" + str(iteration) + ".pdf") plt.close() for col in range(numColumns): print "Diagnostics for column", col printColumnPoolerDiagnostics(exp.getAlgorithmInstance(column=col)) print return args
def runExperiment(arguments): """ We will run two experiments side by side, with either single column or 3 columns """ numColumns = 3 numFeatures = 3 # new: 3 # original: 3 numPoints = 5 # new: 5 # original: 10 numLocations = 5 # new: 5 # original: 10 numObjects = 5 # new: 2 # original: 10 numRptsPerSensation = 1 objectMachine = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=1, seed=40, ) objectMachine.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) objects = objectMachine.provideObjectsToLearn() # single-out the inputs to the column #1 objectsSingleColumn = {} for i in range(numObjects): featureLocations = [] for j in range(numLocations): featureLocations.append({0: objects[i][j][0]}) objectsSingleColumn[i] = featureLocations cellsPerColumn = arguments.cellCount outputCells = arguments.outputCount # params maxNumSegments = 16 L2Overrides = { "noise": 1e-10, "cellCount": outputCells, # new: 256 # original: 4096 "inputWidth": 1024 * cellsPerColumn, # new: 8192 # original: 16384 (?) "activationThreshold": arguments.outputActivation, "sdrSize": arguments.sdrSize, "forgetting": arguments.forgetting, "initMovingAverages": 1 / float(outputCells), "useSupport": arguments.useSupport, "useProximalProbabilities": True, "avoidWeightExplosion": False } L4Overrides = { "noise": 1e-10, "cellsPerColumn": cellsPerColumn, # new: 4 # original 32 "columnCount": 1024, # new: 2048 # original: 2048 "initMovingAverages": 1 / float(2048 * cellsPerColumn), "minThreshold": 1 / float(cellsPerColumn), "useApicalTiebreak": arguments.useApicalTiebreak } if arguments.implementation is None or "Bayesian" in arguments.implementation: if "Summing" not in arguments.implementation: L2Overrides["learningRate"] = arguments.learningRate L4Overrides["learningRate"] = arguments.learningRate exp1 = L4L2Experiment( 'single_column', implementation=arguments.implementation, L2RegionType="py.BayesianColumnPoolerRegion", L4RegionType="py.BayesianApicalTMPairRegion", L2Overrides=L2Overrides, L4Overrides=L4Overrides, numCorticalColumns=1, maxSegmentsPerCell=maxNumSegments, numLearningPoints=7 if arguments.iterations is None else arguments.iterations, seed=1) else: exp1 = L4L2Experiment('single_column', numCorticalColumns=1, maxSegmentsPerCell=maxNumSegments, numLearningPoints=3, seed=1) print "train single column " exp1.learnObjects(objectsSingleColumn) # test on the first object objectId = 2 obj = objectMachine[objectId] # Create sequence of sensations for this object for all columns # We need to set the seed to get specific convergence points for the red # rectangle in the graph. objectSensations = {} random.seed(12) for c in range(numColumns): objectCopy = [pair for pair in obj] # random.shuffle(objectCopy) # stay multiple steps on each sensation sensations = [] for pair in objectCopy: for _ in xrange(numRptsPerSensation): sensations.append(pair) objectSensations[c] = sensations sensationStepsSingleColumn = [] sensationStepsMultiColumn = [] for step in xrange(len(objectSensations[0])): pairs = [objectSensations[col][step] for col in xrange(numColumns)] sdrs = objectMachine._getSDRPairs(pairs) sensationStepsMultiColumn.append(sdrs) sensationStepsSingleColumn.append({0: sdrs[0]}) print "inference: single column " exp1.sendReset() l2ActiveCellsSingleColumn = [] L2ActiveCellNVsTimeSingleColumn = [] l2ActiveValues = [] l2ActiveValuesRepresentation = [] target = objectMachine.objects[2] objects_but_target = [ element for element in objectMachine.objects.values() if element is not target ] counts = np.zeros(len(target)) for num, pair in enumerate(target): for in_object in objects_but_target: counts[num] += in_object.count(pair) print "The feaure-location pairs are shared as follows: ", counts for sensation in sensationStepsSingleColumn: exp1.infer([sensation], objectName=objectId, reset=False) if 'Bayesian' in arguments.implementation: l2ActiveCellsSingleColumn.append(exp1.getL2Prediction()) cellActivity = exp1.getActiveCellValues()[0] l2ActiveValuesRepresentation.append(cellActivity[list( exp1.objectL2Representations[objectId][0])]) l2ActiveValues.append(cellActivity) L2ActiveCellNVsTimeSingleColumn.append( len(exp1.getL2Prediction()[0])) else: rep = exp1.getL2Representations() l2ActiveCellsSingleColumn.append(rep) L2ActiveCellNVsTimeSingleColumn.append(len(rep[0])) # Used to figure out where to put the red rectangle! sdrSize = exp1.config["L2Params"]["sdrSize"] singleColumnHighlight = next( (idx for idx, value in enumerate(l2ActiveCellsSingleColumn) if len(value[0]) == sdrSize), None) firstObjectRepresentation = exp1.objectL2Representations[objectId][0] converged = next((idx for idx, value in enumerate(l2ActiveCellsSingleColumn) if (value[0] == firstObjectRepresentation)), None) print "Converged to first object representation after %s steps" % converged print "Exactly SDR-Size activity (%s) after %s steps" % ( sdrSize, singleColumnHighlight) print "Overlaps of each l2-representation (after new sensation) to each object" for idx in range(0, len(l2ActiveCellsSingleColumn)): print "overlap of l2-representation %s" % idx for i in range(0, len(exp1.objectL2Representations)): object = exp1.objectL2Representations[i][0] l2Representation = l2ActiveCellsSingleColumn[idx][0] overlap = len(l2Representation.intersection(object)) print "\tTo object %s is %s/%s" % (i, overlap, len(l2Representation)) if 'Bayesian' in arguments.implementation: return l2ActiveValuesRepresentation, l2ActiveValues, converged else: return None
def loadThingObjects(numCorticalColumns=1, objDataPath='./data/'): """ Load simulated sensation data on a number of different objects There is one file per object, each row contains one feature, location pairs The format is as follows [(-33.6705, 75.5003, 2.4207)/10] => [[list of active bits of location], [list of active bits of feature]] The content before "=>" is the true 3D location / sensation The number of active bits in the location and feature is listed after "=>". @return A simple object machine """ # create empty simple object machine objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=numCorticalColumns, numFeatures=0, numLocations=0, ) for _ in range(numCorticalColumns): objects.locations.append([]) objects.features.append([]) objFiles = [] for f in os.listdir(objDataPath): if os.path.isfile(os.path.join(objDataPath, f)): if '.log' in f: objFiles.append(f) idx = 0 OnBitsList = [] for f in objFiles: objName = f.split('.')[0] objName = objName[4:] objFile = open('{}/{}'.format(objDataPath, f)) sensationList = [] for line in objFile.readlines(): # parse thing data file and extract feature/location vectors sense = line.split('=>')[1].strip(' ').strip('\n') OnBitsList.append(float(line.split('] =>')[0].split('/')[1])) location = sense.split('],[')[0].strip('[') feature = sense.split('],[')[1].strip(']') location = np.fromstring(location, sep=',', dtype=np.uint8) feature = np.fromstring(feature, sep=',', dtype=np.uint8) # add the current sensation to object Machine sensationList.append((idx, idx)) for c in range(numCorticalColumns): objects.locations[c].append(set(location.tolist())) objects.features[c].append(set(feature.tolist())) idx += 1 objects.addObject(sensationList, objName) print "load object file: {} object name: {} sensation # {}".format( f, objName, len(sensationList)) OnBitsList OnBitsList = np.array(OnBitsList) plt.figure() plt.hist(OnBitsList) return objects, OnBitsList
plotly.plotly.image.save_as( fig, filename='plots/target_object_representations.pdf', scale=4) if __name__ == "__main__": numColumns = 3 numFeatures = 3 numPoints = 10 numLocations = 10 numObjects = 10 numRptsPerSensation = 2 objectMachine = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=3, seed=40, ) objectMachine.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) objects = objectMachine.provideObjectsToLearn() # single-out the inputs to the column #1 objectsSingleColumn = {} for i in range(numObjects): featureLocations = [] for j in range(numLocations):
def runExperiment(args): """ Run experiment. What did you think this does? args is a dict representing the parameters. We do it this way to support multiprocessing. args contains one or more of the following keys: @param noiseLevel (float) Noise level to add to the locations and features during inference. Default: None @param profile (bool) If True, the network will be profiled after learning and inference. Default: False @param numObjects (int) The number of objects we will train. Default: 10 @param numPoints (int) The number of points on each object. Default: 10 @param numLocations (int) For each point, the number of locations to choose from. Default: 10 @param numFeatures (int) For each point, the number of features to choose from. Default: 10 @param numColumns (int) The total number of cortical columns in network. Default: 2 The method returns the args dict updated with two additional keys: convergencePoint (int) The average number of iterations it took to converge across all objects objects (pairs) The list of objects we trained on """ numObjects = args.get("numObjects", 10) numLocations = args.get("numLocations", 10) numFeatures = args.get("numFeatures", 10) numColumns = args.get("numColumns", 2) profile = args.get("profile", False) noiseLevel = args.get("noiseLevel", None) # TODO: implement this? numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) l2Params = args.get("l2Params", getL2Params()) l4Params = args.get("l4Params", getL4Params()) objectSeed = args.get("objectSeed", 41) # Create the objects objects = createObjectMachine( machineType="simple", numInputBits=20, sensorInputSize=1024, externalInputSize=1024, numCorticalColumns=numColumns, seed=objectSeed, ) objects.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) # print "Objects are:" # for o in objects: # pairs = objects[o] # pairs.sort() # print str(o) + ": " + str(pairs) # Setup experiment and train the network name = "convergence_O%03d_L%03d_F%03d_C%03d_T%03d" % ( numObjects, numLocations, numFeatures, numColumns, trialNum) exp = L4L2Experiment(name, L2Overrides=l2Params, L4Overrides=l4Params, numCorticalColumns=numColumns, seed=trialNum) exp.learnObjects(objects.provideObjectsToLearn()) L2TimeLearn = 0 L2TimeInfer = 0 if profile: # exp.printProfile(reset=True) L2TimeLearn = getProfileInfo(exp) args.update({"L2TimeLearn": L2TimeLearn}) exp.resetProfile() # For inference, we will check and plot convergence for each object. For each # object, we create a sequence of random sensations for each column. We will # present each sensation for 3 time steps to let it settle and ensure it # converges. for objectId in objects: obj = objects[objectId] # Create sequence of sensations for this object for all columns objectSensations = {} for c in range(numColumns): objectCopy = [pair for pair in obj] random.shuffle(objectCopy) # stay multiple steps on each sensation sensations = [] for pair in objectCopy: for _ in xrange(2): sensations.append(pair) objectSensations[c] = sensations inferConfig = { "object": objectId, "numSteps": len(objectSensations[0]), "pairs": objectSensations } exp.infer(objects.provideObjectToInfer(inferConfig), objectName=objectId) if profile: L2TimeInfer += getProfileInfo(exp) exp.resetProfile() # exp.printProfile(reset=True) if profile: L2TimeInfer /= len(objects) args.update({"L2TimeInfer": L2TimeInfer}) convergencePoint = averageConvergencePoint(exp.getInferenceStats(), "L2 Representation", 40) print "objectSeed {} # distal syn {} # proximal syn {}, " \ "# convergence point={:4.2f} train time {:4.3f} infer time {:4.3f}".format( objectSeed, l2Params["sampleSizeDistal"], l2Params["sampleSizeProximal"], convergencePoint, L2TimeLearn, L2TimeInfer) # Return our convergence point as well as all the parameters and objects args.update({"objects": objects.getObjects()}) args.update({"convergencePoint": convergencePoint}) # prepare experiment results numLateralConnections = [] numProximalConnections = [] for l2Columns in exp.L2Columns: numLateralConnections.append( l2Columns._pooler.numberOfDistalSynapses()) numProximalConnections.append( np.sum(l2Columns._pooler.numberOfProximalSynapses())) result = { 'trial': objectSeed, 'L2TimeLearn': args['L2TimeLearn'], 'L2TimeInfer': args['L2TimeInfer'], 'sampleSizeProximal': l2Params["sampleSizeProximal"], 'sampleSizeDistal': l2Params["sampleSizeDistal"], 'numLateralConnections': np.mean(np.array(numLateralConnections)), 'numProximalConnections': np.mean(np.array(numProximalConnections)), 'convergencePoint': args['convergencePoint'] } return result
def runExperiment(args): """ Runs the experiment. The code is organized around what we need for specific figures in the paper. args is a dict representing the various parameters. We do it this way to support multiprocessing. The function returns the args dict updated with a number of additional keys containing performance metrics. """ numObjects = args.get("numObjects", 10) numSequences = args.get("numSequences", 10) numFeatures = args.get("numFeatures", 10) seqLength = args.get("seqLength", 10) numPoints = args.get("numPoints", 10) trialNum = args.get("trialNum", 42) inputSize = args.get("inputSize", 1024) numLocations = args.get("numLocations", 100000) numInputBits = args.get("inputBits", 20) settlingTime = args.get("settlingTime", 1) numRepetitions = args.get("numRepetitions", 5) figure = args.get("figure", False) synPermProximalDecL2 = args.get("synPermProximalDecL2", 0.001) minThresholdProximalL2 = args.get("minThresholdProximalL2", 10) sampleSizeProximalL2 = args.get("sampleSizeProximalL2", 15) basalPredictedSegmentDecrement = args.get( "basalPredictedSegmentDecrement", 0.0006) stripStats = args.get("stripStats", True) random.seed(trialNum) ##################################################### # # Create the sequences and objects, and make sure they share the # same features and locations. sequences = createObjectMachine( machineType="sequence", numInputBits=numInputBits, sensorInputSize=inputSize, externalInputSize=1024, numCorticalColumns=1, numFeatures=numFeatures, numLocations=numLocations, seed=trialNum ) sequences.createRandomSequences(numSequences, seqLength) objects = createObjectMachine( machineType="simple", numInputBits=numInputBits, sensorInputSize=inputSize, externalInputSize=1024, numCorticalColumns=1, numFeatures=numFeatures, numLocations=numLocations, seed=trialNum ) # Make sure they share the same features and locations objects.locations = sequences.locations objects.features = sequences.features objects.createRandomObjects(numObjects, numPoints=numPoints, numLocations=numLocations, numFeatures=numFeatures) ##################################################### # # Setup experiment and train the network name = "combined_sequences_S%03d_O%03d_F%03d_L%03d_T%03d" % ( numSequences, numObjects, numFeatures, numLocations, trialNum ) exp = L4TMExperiment( name=name, numCorticalColumns=1, inputSize=inputSize, numExternalInputBits=numInputBits, externalInputSize=1024, numInputBits=numInputBits, seed=trialNum, L2Overrides={"synPermProximalDec": synPermProximalDecL2, "minThresholdProximal": minThresholdProximalL2, "sampleSizeProximal": sampleSizeProximalL2, "initialProximalPermanence": 0.45, "synPermProximalDec": 0.002, }, TMOverrides={ "basalPredictedSegmentDecrement": basalPredictedSegmentDecrement }, L4Overrides={"initialPermanence": 0.21, "activationThreshold": 18, "minThreshold": 18, "basalPredictedSegmentDecrement": basalPredictedSegmentDecrement, }, ) printDiagnostics(exp, sequences, objects, args, verbosity=0) # Train the network on all the sequences and then all the objects. if figure in ["S", "6", "7"]: trainSuperimposedSequenceObjects(exp, numRepetitions, sequences, objects) else: trainObjects(objects, exp, numRepetitions) trainSequences(sequences, exp, numObjects) ########################################################################## # # Run inference print "Running inference" if figure in ["6"]: # We have trained the system on both temporal sequences and # objects. We test the system by randomly switching between sequences and # objects. To replicate the graph, we want to run sequences and objects in a # specific order for trial,itemType in enumerate(["sequence", "object", "sequence", "object", "sequence", "sequence", "object", "sequence", ]): if itemType == "sequence": objectId = random.randint(0, numSequences-1) inferSequence(exp, objectId, sequences, objectId+numObjects) else: objectId = random.randint(0, numObjects-1) inferObject(exp, objectId, objects, objectId) elif figure in ["7"]: # For figure 7 we have trained the system on both temporal sequences and # objects. We test the system by superimposing randomly chosen sequences and # objects. for trial in range(10): sequenceId = random.randint(0, numSequences - 1) objectId = random.randint(0, numObjects - 1) inferSuperimposedSequenceObjects(exp, sequenceId=sequenceId, objectId=objectId, sequences=sequences, objects=objects) else: # By default run inference on every sequence and object in order. for objectId in objects: inferObject(exp, objectId, objects, objectId) for seqId in sequences: inferSequence(exp, seqId, sequences, seqId+numObjects) ########################################################################## # # Debugging diagnostics printDiagnosticsAfterTraining(exp) ########################################################################## # # Compute a number of overall inference statistics print "# Sequences {} # features {} trial # {}\n".format( numSequences, numFeatures, trialNum) convergencePoint, sequenceAccuracyL2 = exp.averageConvergencePoint( "L2 Representation", 30, 40, 1, numObjects) print "L2 accuracy for sequences:", sequenceAccuracyL2 convergencePoint, objectAccuracyL2 = exp.averageConvergencePoint( "L2 Representation", 30, 40, 1, 0, numObjects) print "L2 accuracy for objects:", objectAccuracyL2 objectCorrectSparsityTM, _ = exp.averageSequenceAccuracy(15, 25, 0, numObjects) print "TM accuracy for objects:", objectCorrectSparsityTM sequenceCorrectSparsityTM, sequenceCorrectClassificationsTM = \ exp.averageSequenceAccuracy(15, 25, numObjects) print "TM accuracy for sequences:", sequenceCorrectClassificationsTM infStats = exp.getInferenceStats() predictedActive = numpy.zeros(len(infStats)) predicted = numpy.zeros(len(infStats)) predictedActiveL4 = numpy.zeros(len(infStats)) predictedL4 = numpy.zeros(len(infStats)) for i,stat in enumerate(infStats): predictedActive[i] = float(sum(stat["TM PredictedActive C0"][2:])) / len( stat["TM PredictedActive C0"][2:]) predicted[i] = float(sum(stat["TM NextPredicted C0"][2:])) / len( stat["TM NextPredicted C0"][2:]) predictedActiveL4[i] = float(sum(stat["L4 PredictedActive C0"])) / len( stat["L4 PredictedActive C0"]) predictedL4[i] = float(sum(stat["L4 Predicted C0"])) / len( stat["L4 Predicted C0"]) # Return a bunch of metrics we will use in plots args.update({"sequences": sequences.getObjects()}) args.update({"objects": objects.getObjects()}) args.update({"convergencePoint":convergencePoint}) args.update({"objectAccuracyL2": objectAccuracyL2}) args.update({"sequenceAccuracyL2": sequenceAccuracyL2}) args.update({"sequenceCorrectSparsityTM": sequenceCorrectSparsityTM}) args.update({"sequenceCorrectClassificationsTM": sequenceCorrectClassificationsTM}) args.update({"objectCorrectSparsityTM": objectCorrectSparsityTM}) args.update({"averagePredictions": predicted.mean()}) args.update({"averagePredictedActive": predictedActive.mean()}) args.update({"averagePredictionsL4": predictedL4.mean()}) args.update({"averagePredictedActiveL4": predictedActiveL4.mean()}) if stripStats: exp.stripStats() args.update({"name": exp.name}) args.update({"statistics": exp.statistics}) args.update({"networkConfig": exp.config}) return args