class PositionPredictionModel(Model): def __init__(self, motorValues=range(-4, 4+1), sparsity=0.02, encoderResolution=1.0, tmParams=None): super(PositionPredictionModel, self).__init__(motorValues=motorValues) tmParams = tmParams or {} self.tm = MonitoredExtendedTemporalMemory(mmName="TM", **tmParams) self.n = self.tm.numberOfColumns() self.w = int(self.n * sparsity) + 1 self.encoderResolution = encoderResolution self.sensorEncoder = CoordinateEncoder(w=self.w, n=self.n) self.motorEncoder = CoordinateEncoder(w=self.w, n=self.n) def update(self, sensorValue, motorValue, goalValue=None): scale = 100 radius = int(self.encoderResolution * scale) sensorInput = (numpy.array([int(sensorValue * scale)]), radius) motorInput = (numpy.array([int(motorValue * scale)]), radius) sensorPattern = set(self.sensorEncoder.encode(sensorInput).nonzero()[0]) motorPattern = set(self.motorEncoder.encode(motorInput).nonzero()[0]) self.tm.compute(sensorPattern, activeExternalCells=motorPattern, formInternalConnections=True, learn=True)
class PositionPredictionModel(Model): def __init__(self, motorValues=range(-4, 4 + 1), sparsity=0.02, encoderResolution=1.0, tmParams=None): super(PositionPredictionModel, self).__init__(motorValues=motorValues) tmParams = tmParams or {} self.tm = MonitoredGeneralTemporalMemory(mmName="TM", **tmParams) self.n = self.tm.numberOfColumns() self.w = int(self.n * sparsity) + 1 self.encoderResolution = encoderResolution self.sensorEncoder = CoordinateEncoder(w=self.w, n=self.n) self.motorEncoder = CoordinateEncoder(w=self.w, n=self.n) def update(self, sensorValue, motorValue, goalValue=None): scale = 100 radius = int(self.encoderResolution * scale) sensorInput = (numpy.array([int(sensorValue * scale)]), radius) motorInput = (numpy.array([int(motorValue * scale)]), radius) sensorPattern = set( self.sensorEncoder.encode(sensorInput).nonzero()[0]) motorPattern = set(self.motorEncoder.encode(motorInput).nonzero()[0]) self.tm.compute(sensorPattern, activeExternalCells=motorPattern, formInternalConnections=True, learn=True)
class PositionPredictionModel(Model): def __init__(self, motorValues=range(-4, 4 + 1), sparsity=0.02, encoderResolution=1.0, tmParams=None): super(PositionPredictionModel, self).__init__(motorValues=motorValues) tmParams = dict(DEFAULT_TM_PARAMS) tmParams.update(tmParams or {}) self.tm = MonitoredApicalTiebreakPairMemory(mmName="TM", **tmParams) self.n = self.tm.numberOfColumns() self.w = int(self.n * sparsity) + 1 self.encoderResolution = encoderResolution self.sensorEncoder = CoordinateEncoder(w=self.w, n=self.n) self.motorEncoder = CoordinateEncoder(w=self.w, n=self.n) self.prevMotorPattern = () def update(self, sensorValue, motorValue, goalValue=None): scale = 100 radius = int(self.encoderResolution * scale) sensorInput = (numpy.array([int(sensorValue * scale)]), radius) motorInput = (numpy.array([int(motorValue * scale)]), radius) sensorPattern = set( self.sensorEncoder.encode(sensorInput).nonzero()[0]) motorPattern = set(self.motorEncoder.encode(motorInput).nonzero()[0]) self.tm.compute(sensorPattern, activeCellsExternalBasal=motorPattern, reinforceCandidatesExternalBasal=self.prevMotorPattern, growthCandidatesExternalBasal=self.prevMotorPattern, learn=True) self.prevMotorPattern = motorPattern
class PositionPredictionModel(Model): def __init__(self, motorValues=range(-4, 4+1), sparsity=0.02, encoderResolution=1.0, tmParams=None): super(PositionPredictionModel, self).__init__(motorValues=motorValues) tmParams = dict(DEFAULT_TM_PARAMS) tmParams.update(tmParams or {}) self.tm = MonitoredApicalTiebreakPairMemory(mmName="TM", **tmParams) self.n = self.tm.numberOfColumns() self.w = int(self.n * sparsity) + 1 self.encoderResolution = encoderResolution self.sensorEncoder = CoordinateEncoder(w=self.w, n=self.n) self.motorEncoder = CoordinateEncoder(w=self.w, n=self.n) self.prevMotorPattern = () def update(self, sensorValue, motorValue, goalValue=None): scale = 100 radius = int(self.encoderResolution * scale) sensorInput = (numpy.array([int(sensorValue * scale)]), radius) motorInput = (numpy.array([int(motorValue * scale)]), radius) sensorPattern = set(self.sensorEncoder.encode(sensorInput).nonzero()[0]) motorPattern = set(self.motorEncoder.encode(motorInput).nonzero()[0]) self.tm.compute(sensorPattern, activeCellsExternalBasal=motorPattern, reinforceCandidatesExternalBasal=self.prevMotorPattern, growthCandidatesExternalBasal=self.prevMotorPattern, learn=True) self.prevMotorPattern = motorPattern
class Agent(object): def __init__(self): self.encoder = CoordinateEncoder(n=1024, w=21) self.motorEncoder = ScalarEncoder(21, -1, 1, n=1024) self.tm = MonitoredGeneralTemporalMemory(columnDimensions=[2048], cellsPerColumn=1, initialPermanence=0.5, connectedPermanence=0.6, permanenceIncrement=0.1, permanenceDecrement=0.02, minThreshold=35, activationThreshold=35, maxNewSynapseCount=40) self.plotter = Plotter(self.tm) self.lastState = None self.lastAction = None def sync(self, outputData): if not ("location" in outputData and "steer" in outputData): print "Warning: Missing data:", outputData return if outputData.get("reset"): print "Reset." self.tm.reset() location = outputData["location"] steer = outputData["steer"] x = int(location["x"] * SCALE) z = int(location["z"] * SCALE) coordinate = numpy.array([x, z]) encoding = self.encoder.encode((coordinate, RADIUS)) motorEncoding = self.motorEncoder.encode(steer) sensorPattern = set(encoding.nonzero()[0]) motorPattern = set(motorEncoding.nonzero()[0]) self.tm.compute(sensorPattern, activeExternalCells=motorPattern, formInternalConnections=True) print self.tm.mmPrettyPrintMetrics(self.tm.mmGetDefaultMetrics()) overlap = 0 if self.lastState is not None: overlap = (self.lastState & encoding).sum() self.plotter.update(overlap) if outputData.get("reset"): self.plotter.render() self.lastState = encoding self.lastAction = steer
def testEncodeIntoArray(self): n = 33 w = 3 encoder = CoordinateEncoder(name="coordinate", n=n, w=w) coordinate = np.array([100, 200]) radius = 5 output1 = encode(encoder, coordinate, radius) self.assertEqual(np.sum(output1), w) # Test that we get the same output for the same input output2 = encode(encoder, coordinate, radius) self.assertTrue(np.array_equal(output2, output1)) # Test that a float radius raises an assertion error with self.assertRaises(AssertionError): encoder.encode((coordinate, float(radius)))
def testEncodeIntoArray(self): n = 33 w = 3 encoder = CoordinateEncoder(name="coordinate", n=n, w=w) coordinate = np.array([100, 200]) radius = 5 output1 = encode(encoder, coordinate, radius) self.assertEqual(np.sum(output1), w) # Test that we get the same output for the same input output2 = encode(encoder, coordinate, radius) self.assertTrue(np.array_equal(output2, output1)) # Test that a float radius raises an assertion error with self.assertRaises(AssertionError): encoder.encode((coordinate, float(radius)))
class PositionBehaviorModel(Model): def __init__(self, motorValues=range(-4, 4 + 1), sparsity=0.02, encoderResolution=0.5, bmParams=None): super(PositionBehaviorModel, self).__init__(motorValues=motorValues) self.encoderResolution = encoderResolution bmParams = bmParams or {} numMotorColumns = len(self.motorValues) bmParams["numMotorColumns"] = numMotorColumns self.bm = BehaviorMemory(**bmParams) self.sensorN = self.bm.numSensorColumns self.sensorW = int(self.sensorN * sparsity) + 1 self.sensorEncoder = CoordinateEncoder(w=self.sensorW, n=self.sensorN) def update(self, sensorValue, motorValue, goalValue=None): motorPattern = set([self.motorValues.index(motorValue)]) scale = 100 radius = int(self.encoderResolution * scale) sensorInput = (numpy.array([int(sensorValue * scale)]), radius) sensorPattern = set( self.sensorEncoder.encode(sensorInput).nonzero()[0]) goalPattern = set() if goalValue is not None: goalInput = (numpy.array([int(goalValue * scale)]), radius) goalPattern = set( self.sensorEncoder.encode(goalInput).nonzero()[0]) self.bm.compute(motorPattern, sensorPattern, goalPattern) if goalValue is not None: return self.decodeMotor() def decodeMotor(self): idx = self.bm.motor.argmax() return self.motorValues[idx]
class PositionBehaviorModel(Model): def __init__(self, motorValues=range(-4, 4+1), sparsity=0.02, encoderResolution=0.5, bmParams=None): super(PositionBehaviorModel, self).__init__(motorValues=motorValues) self.encoderResolution = encoderResolution bmParams = bmParams or {} numMotorColumns = len(self.motorValues) bmParams["numMotorColumns"] = numMotorColumns self.bm = BehaviorMemory(**bmParams) self.sensorN = self.bm.numSensorColumns self.sensorW = int(self.sensorN * sparsity) + 1 self.sensorEncoder = CoordinateEncoder(w=self.sensorW, n=self.sensorN) def update(self, sensorValue, motorValue, goalValue=None): motorPattern = set([self.motorValues.index(motorValue)]) scale = 100 radius = int(self.encoderResolution * scale) sensorInput = (numpy.array([int(sensorValue * scale)]), radius) sensorPattern = set(self.sensorEncoder.encode(sensorInput).nonzero()[0]) goalPattern = set() if goalValue is not None: goalInput = (numpy.array([int(goalValue * scale)]), radius) goalPattern = set(self.sensorEncoder.encode(goalInput).nonzero()[0]) self.bm.compute(motorPattern, sensorPattern, goalPattern) if goalValue is not None: return self.decodeMotor() def decodeMotor(self): idx = self.bm.motor.argmax() return self.motorValues[idx]
class ContinuousLocationObjectMachine(ObjectMachineBase): """ This implementation of the object machine uses continuous locations instead of discrete random ones. They are created using a CoordinateEncoder. The "objects" should be PhysicalObjects as defined in physical_object_base and physical_objects. Subclass the base implementation for specific needs. """ def __init__(self, numInputBits=41, sensorInputSize=2048, externalInputSize=2048, numCorticalColumns=1, numFeatures=400, dimension=3, seed=42): """ At creation, the SimpleObjectMachine creates a pool of locations and features SDR's. Parameters: ---------------------------- @param numInputBits (int) Number of ON bits in the input. Note: it should be uneven as the encoder only accepts uneven number of bits. @param sensorInputSize (int) Total number of bits in the sensory input @param externalInputSize (int) Total number of bits the external (location) input @param numCorticalColumns (int) Number of cortical columns used in the experiment @param dimension (int) Dimension of the locations. Will typically be 3. @param numFeatures (int) Number of feature SDRs to generate per cortical column. There is typically no need to not use the default value, unless the user knows he will use more than 400 patterns. @param seed (int) Seed to be used in the machine """ super(ContinuousLocationObjectMachine, self).__init__(numInputBits, sensorInputSize, externalInputSize, numCorticalColumns, seed) # location and features pool self.numFeatures = numFeatures self._generateFeatures() self.dimension = dimension self.locationEncoder = CoordinateEncoder( w=numInputBits, n=externalInputSize, name="locationEncoder" ) def provideObjectsToLearn(self, learningConfig, plot=False): """ Returns the objects in a canonical format to be sent to an experiment. The input, learningConfig, should have the following format. It is a mapping from object to a list of features to sample locations from, and the number of points to sample from each feature. Note that these objects should be first added with .addObjects(). These features can be either hard-coded with their key or accessed with .getFeatures. An other possibility is to directly specify locations. The machine will use the object to find the corresponding feature (an empty feature will be sent if the location is not on the object's surface). learningConfig = { # hard-coded keys and number of points "cube": [("face", 5), ("edge", 5), ("vertex", 3)], # programmatically-accessed keys and number of points "cylinder": [(feature, 5) for feature in cylinder.getFeatures()], # specific locations "sphere": [(10, 5, 3), (12, 45, 32), (12, 5, 46)], } The returned format is a a dictionary where the keys are object names, and values are lists of sensations, each sensation being a mapping from cortical column index to a pair of SDR's (one location and one feature). Parameters: ---------------------------- @param learningConfig (dict) Configuration for learning, as described above. """ objects = {} for objectName, locationList in learningConfig.iteritems(): sensationList = [] physicalObject = self.objects[objectName] if plot: fig, ax = physicalObject.plot() for element in locationList: # location name and number of points if len(element) == 2: featureName, numLocations = element for _ in xrange(numLocations): location = physicalObject.sampleLocationFromFeature(featureName) sensationList.append( self._getSDRPairs( [(location, physicalObject.getFeatureID(location))] * self.numColumns ) ) if plot: x, y, z = tuple(location) ax.scatter(x, y, z, marker="v", s=100, c="r") # explicit location elif len(element) == 3: location = list(element) sensationList.append( self._getSDRPairs( [(location, physicalObject.getFeatureID(location))] * self.numColumns ) ) if plot: x, y, z = tuple(location) ax.scatter(x, y, z, marker="v", s=100, c="r") else: raise ValueError("Unsupported type for location spec") objects[objectName] = sensationList if plot: plt.title("Learning points for object {}".format(objectName)) plt.savefig("learn_{}.png".format(objectName)) plt.close() self._checkObjectsToLearn(objects) return objects def provideObjectToInfer(self, inferenceConfig, plot=False): """ Returns the sensations in a canonical format to be sent to an experiment. The input inferenceConfig should be a dict with the following form. The "pairs" field provide a mapping from cortical column to a list of sensations, each sensations being either: - a feature key to sample a location from - an explicit location { "numSteps": 2 # number of sensations "noiseLevel": 0.05 # noise to add to sensations (optional) "objectName": 0 # optional "pairs": { 0: ["random", "face"] # locations for cortical column 0 1: [(12, 32, 34), (23, 23, 32)] # locations for cortical column 1 } } The returned format is a a lists of sensations, each sensation being a mapping from cortical column index to a pair of SDR's (one location and one feature). Parameters: ---------------------------- @param inferenceConfig (dict) Inference spec for experiment (cf above for format) """ if "numSteps" in inferenceConfig: numSteps = inferenceConfig["numSteps"] else: numSteps = len(inferenceConfig["pairs"][0]) if "noiseLevel" in inferenceConfig: noise = inferenceConfig["noiseLevel"] else: noise = None # some checks if numSteps == 0: raise ValueError("No inference steps were provided") for col in xrange(self.numColumns): if len(inferenceConfig["pairs"][col]) != numSteps: raise ValueError("Incompatible numSteps and actual inference steps") if "objectName" in inferenceConfig: physicalObject = self.objects[inferenceConfig["objectName"]] else: physicalObject = None if plot: # don't use if object is not known fig, ax = physicalObject.plot() colors = plt.cm.rainbow(np.linspace(0, 1, numSteps)) sensationSteps = [] for step in xrange(numSteps): pairs = [ inferenceConfig["pairs"][col][step] for col in xrange(self.numColumns) ] for i in xrange(len(pairs)): if isinstance(pairs[i], str): location = physicalObject.sampleLocationFromFeature(pairs[i]) pairs[i] = ( location, physicalObject.getFeatureID(location) ) else: location = pairs[i] pairs[i] = ( location, physicalObject.getFeatureID(location) ) if plot: x, y, z = tuple(location) ax.scatter(x, y, z, marker="v", s=100, c=colors[step]) sensationSteps.append(self._getSDRPairs(pairs, noise=noise)) if plot: plt.title("Inference points for object {}".format( inferenceConfig["objectName"]) ) plt.savefig("infer_{}.png".format( inferenceConfig["objectName"])) plt.close() self._checkObjectToInfer(sensationSteps) return sensationSteps def addObject(self, object, name=None): """ Adds an object to the Machine. Objects should be PhysicalObjects. """ if name is None: name = len(self.objects) self.objects[name] = object def _getSDRPairs(self, pairs, noise=None): """ This method takes a list of (location, feature) pairs (one pair per cortical column), and returns a sensation dict in the correct format, adding noise if necessary. In each pair, the location is an actual integer location to be encoded, and the feature is just an index. """ sensations = {} for col in xrange(self.numColumns): location, featureID = pairs[col] location = [int(coord) for coord in location] location = self.locationEncoder.encode( (np.array(location, dtype="int32"), self._getRadius(location)) ) location = set(location.nonzero()[0]) # generate empty feature if requested if featureID == -1: feature = set() # generate union of features if requested elif isinstance(featureID, tuple): feature = set() for idx in list(featureID): feature = feature | self.features[col][idx] else: feature = self.features[col][featureID] if noise is not None: location = self._addNoise(location, noise) feature = self._addNoise(feature, noise) sensations[col] = (location, feature) return sensations def _getRadius(self, location): """ Returns the radius associated with the given location. This is a bit of an awkward argument to the CoordinateEncoder, which specifies the resolution (in was used to encode differently depending on speed in the GPS encoder). Since the coordinates are object-centric, for now we use the "point radius" as an heuristic, but this should be experimented and improved. """ # TODO: find better heuristic return int(math.sqrt(sum([coord ** 2 for coord in location]))) def _addNoise(self, pattern, noiseLevel): """ Adds noise the given list of patterns and returns a list of noisy copies. """ if pattern is None: return None newBits = [] for bit in pattern: if random.random() < noiseLevel: newBits.append(random.randint(0, max(pattern))) else: newBits.append(bit) return set(newBits) def _generatePattern(self, numBits, totalSize): """ Generates a random SDR with specified number of bits and total size. """ cellsIndices = range(totalSize) random.shuffle(cellsIndices) return set(cellsIndices[:numBits]) def _generateFeatures(self): """ Generates a pool of features to be used for the experiments. For each index, numColumns SDR's are created, as locations for the same feature should be different for each column. """ size = self.sensorInputSize bits = self.numInputBits self.features = [] for _ in xrange(self.numColumns): self.features.append( [self._generatePattern(bits, size) for _ in xrange(self.numFeatures)] )
class CoordinateSensorRegion(PyRegion): """ CoordinateSensorRegion is a simple sensor for sending coordinate data into networks using NuPIC's CoordinateEncoder. It accepts data using the command "addDataToQueue" or through the function addDataToQueue() which can be called directly from Python. Data is queued up in a FIFO and each call to compute pops the top element. Each data record consists of the coordinate in an N-dimensional integer coordinate space, a 0/1 reset flag, and an integer sequence ID. """ def __init__(self, activeBits=21, outputWidth=1000, radius=2, verbosity=0): self.verbosity = verbosity self.activeBits = activeBits self.outputWidth = outputWidth self.radius = radius self.queue = deque() self.encoder = CoordinateEncoder(n=self.outputWidth, w=self.activeBits, verbosity=self.verbosity) @classmethod def getSpec(cls): """Return base spec for this region. See base class method for more info.""" spec = { "description": CoordinateSensorRegion.__doc__, "singleNodeOnly": True, "inputs": {}, # input data is added to queue via "addDataToQueue" command "outputs": { "dataOut": { "description": "Encoded coordinate SDR.", "dataType": "uint", "count": 0, "regionLevel": True, "isDefaultOutput": True, }, "resetOut": { "description": "0/1 reset flag output.", "dataType": "uint", "count": 1, "regionLevel": True, "isDefaultOutput": False, }, "sequenceIdOut": { "description": "Sequence ID", "dataType": "uint", "count": 1, "regionLevel": True, "isDefaultOutput": False, }, }, "parameters": { "activeBits": { "description": "The number of bits that are set to encode a single " "coordinate value", "dataType": "uint", "accessMode": "ReadWrite", "count": 1, "defaultValue": 21 }, "outputWidth": { "description": "Size of output vector", "dataType": "uint", "accessMode": "ReadWrite", "count": 1, "defaultValue": 1000 }, "radius": { "description": "Radius around 'coordinate'", "dataType": "uint", "accessMode": "ReadWrite", "count": 1, "defaultValue": 2 }, "verbosity": { "description": "Verbosity level", "dataType": "uint", "accessMode": "ReadWrite", "count": 1 }, }, "commands": { "addDataToQueue": { "description": CoordinateSensorRegion.addDataToQueue.__doc__, }, "addResetToQueue": { "description": CoordinateSensorRegion.addResetToQueue.__doc__, } }, } return spec def compute(self, inputs, outputs): """ Get the next record from the queue and encode it. @param inputs This parameter is ignored. The data comes from the queue @param outputs See definition in the spec above. """ if len(self.queue) > 0: data = self.queue.pop() else: raise Exception( "CoordinateSensor: No data to encode: queue is empty") outputs["resetOut"][0] = data["reset"] outputs["sequenceIdOut"][0] = data["sequenceId"] sdr = self.encoder.encode( (numpy.array(data["coordinate"]), self.radius)) outputs["dataOut"][:] = sdr if self.verbosity > 1: print "CoordinateSensor outputs:" print "Coordinate = ", data["coordinate"] print "sequenceIdOut: ", outputs["sequenceIdOut"] print "resetOut: ", outputs["resetOut"] print "dataOut: ", outputs["dataOut"].nonzero()[0] def addDataToQueue(self, coordinate, reset, sequenceId): """ Add the given data item to the sensor's internal queue. Calls to compute will cause items in the queue to be dequeued in FIFO order. @param coordinate A list containing the N-dimensional integer coordinate space to be encoded. This list can be specified in two ways, as a python list of integers or as a string which can evaluate to a python list of integers. @param reset An int or string that is 0 or 1. resetOut will be set to this value when this item is computed. @param sequenceId An int or string with an integer ID associated with this token and its sequence (document). """ if type(coordinate) == type(""): coordinateList = eval(coordinate) elif type(coordinate) == type([]): coordinateList = coordinate else: raise Exception( "CoordinateSensor.addDataToQueue: unknown type for " "coordinate") self.queue.appendleft({ "sequenceId": int(sequenceId), "reset": int(reset), "coordinate": coordinateList, }) def addResetToQueue(self, sequenceId): """ Add a reset signal to the sensor's internal queue. Calls to compute will cause items in the queue to be dequeued in FIFO order. @param sequenceId An int or string with an integer ID associated with this token and its sequence (document). """ self.queue.appendleft({ "sequenceId": int(sequenceId), "reset": 1, "coordinate": [], }) def getOutputElementCount(self, name): """Returns the width of dataOut.""" if name == "resetOut" or name == "sequenceIdOut": # Should never actually be called since output size is specified in spec return 1 elif name == "dataOut": return self.outputWidth else: raise Exception("Unknown output {}.".format(name)) def initialize(self): """ Initialize the Region - nothing to do here. """ pass
class Agent(object): def __init__(self): self.encoder = CoordinateEncoder(n=1024, w=21) self.motorEncoder = ScalarEncoder(21, -1, 1, n=1024) self.tm = MonitoredExtendedTemporalMemory( columnDimensions=[2048], cellsPerColumn=1, initialPermanence=0.5, connectedPermanence=0.6, permanenceIncrement=0.1, permanenceDecrement=0.02, minThreshold=35, activationThreshold=35, maxNewSynapseCount=40) self.plotter = Plotter(self.tm, showOverlaps=False, showOverlapsValues=False) self.lastState = None self.lastAction = None def sync(self, outputData): if not ("location" in outputData and "steer" in outputData): print "Warning: Missing data:", outputData return reset = outputData.get("reset") or False if reset: print "Reset." self.tm.reset() location = outputData["location"] steer = outputData["steer"] x = int(location["x"] * SCALE) z = int(location["z"] * SCALE) coordinate = numpy.array([x, z]) encoding = self.encoder.encode((coordinate, RADIUS)) motorEncoding = self.motorEncoder.encode(steer) sensorPattern = set(encoding.nonzero()[0]) motorPattern = set(motorEncoding.nonzero()[0]) self.tm.compute(sensorPattern, activeExternalCells=motorPattern, formInternalConnections=True) print self.tm.mmPrettyPrintMetrics(self.tm.mmGetDefaultMetrics()) self.plotter.update(encoding, reset) if reset: self.plotter.render() self.lastState = encoding self.lastAction = steer
class CoordinateSensorRegion(PyRegion): """ CoordinateSensorRegion is a simple sensor for sending coordinate data into networks using NuPIC's CoordinateEncoder. It accepts data using the command "addDataToQueue" or through the function addDataToQueue() which can be called directly from Python. Data is queued up in a FIFO and each call to compute pops the top element. Each data record consists of the coordinate in an N-dimensional integer coordinate space, a 0/1 reset flag, and an integer sequence ID. """ def __init__(self, activeBits=21, outputWidth=1000, radius=2, verbosity=0): self.verbosity = verbosity self.activeBits = activeBits self.outputWidth = outputWidth self.radius = radius self.queue = deque() self.encoder = CoordinateEncoder(n=self.outputWidth, w=self.activeBits, verbosity=self.verbosity) @classmethod def getSpec(cls): """Return base spec for this region. See base class method for more info.""" spec = { "description": CoordinateSensorRegion.__doc__, "singleNodeOnly": True, "inputs": {}, # input data is added to queue via "addDataToQueue" command "outputs": { "dataOut": { "description": "Encoded coordinate SDR.", "dataType": "Real32", "count": 0, "regionLevel": True, "isDefaultOutput": True, }, "resetOut": { "description": "0/1 reset flag output.", "dataType": "UInt32", "count": 1, "regionLevel": True, "isDefaultOutput": False, }, "sequenceIdOut": { "description": "Sequence ID", "dataType": "UInt32", "count": 1, "regionLevel": True, "isDefaultOutput": False, }, }, "parameters": { "activeBits": { "description": "The number of bits that are set to encode a single " "coordinate value", "dataType": "uint", "accessMode": "ReadWrite", "count": 1, "defaultValue": 21 }, "outputWidth": { "description": "Size of output vector", "dataType": "UInt32", "accessMode": "ReadWrite", "count": 1, "defaultValue": 1000 }, "radius": { "description": "Radius around 'coordinate'", "dataType": "UInt32", "accessMode": "ReadWrite", "count": 1, "defaultValue": 2 }, "verbosity": { "description": "Verbosity level", "dataType": "UInt32", "accessMode": "ReadWrite", "count": 1 }, }, "commands": { "addDataToQueue": { "description": CoordinateSensorRegion.addDataToQueue.__doc__, }, "addResetToQueue": { "description": CoordinateSensorRegion.addResetToQueue.__doc__, } }, } return spec def compute(self, inputs, outputs): """ Get the next record from the queue and encode it. @param inputs This parameter is ignored. The data comes from the queue @param outputs See definition in the spec above. """ if len(self.queue) > 0: data = self.queue.pop() else: raise Exception("CoordinateSensor: No data to encode: queue is empty") outputs["resetOut"][0] = data["reset"] outputs["sequenceIdOut"][0] = data["sequenceId"] sdr = self.encoder.encode((numpy.array(data["coordinate"]), self.radius)) outputs["dataOut"][:] = sdr if self.verbosity > 1: print "CoordinateSensor outputs:" print "Coordinate = ", data["coordinate"] print "sequenceIdOut: ", outputs["sequenceIdOut"] print "resetOut: ", outputs["resetOut"] print "dataOut: ", outputs["dataOut"].nonzero()[0] def addDataToQueue(self, coordinate, reset, sequenceId): """ Add the given data item to the sensor's internal queue. Calls to compute will cause items in the queue to be dequeued in FIFO order. @param coordinate A list containing the N-dimensional integer coordinate space to be encoded. This list can be specified in two ways, as a python list of integers or as a string which can evaluate to a python list of integers. @param reset An int or string that is 0 or 1. resetOut will be set to this value when this item is computed. @param sequenceId An int or string with an integer ID associated with this token and its sequence (document). """ if type(coordinate) == type(""): coordinateList = eval(coordinate) elif type(coordinate) == type([]): coordinateList = coordinate else: raise Exception("CoordinateSensor.addDataToQueue: unknown type for " "coordinate") self.queue.appendleft({ "sequenceId": int(sequenceId), "reset": int(reset), "coordinate": coordinateList, }) def addResetToQueue(self, sequenceId): """ Add a reset signal to the sensor's internal queue. Calls to compute will cause items in the queue to be dequeued in FIFO order. @param sequenceId An int or string with an integer ID associated with this token and its sequence (document). """ self.queue.appendleft({ "sequenceId": int(sequenceId), "reset": 1, "coordinate": [], }) def getOutputElementCount(self, name): """Returns the width of dataOut.""" if name == "resetOut" or name == "sequenceIdOut": # Should never actually be called since output size is specified in spec return 1 elif name == "dataOut": return self.outputWidth else: raise Exception("Unknown output {}.".format(name)) def initialize(self): """ Initialize the Region - nothing to do here. """ pass
class PluggableUniverseSensor(PyRegion): """ Slightly modified version of the PluggableEncoderSensor. Holds an observation value and a reward and encodes it into network output. It requires you to reach in and insert an encoder: .. code-block:: python timestampSensor = network.addRegion("timestampSensor", 'py.PluggableUniverseSensor', "") timestampSensor.getSelf().encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay") """ @classmethod def getSpec(cls): spec = dict( description=PluggableUniverseSensor.__doc__, singleNodeOnly=True, outputs=dict( encoded=dict(description="The encoded observational data ", dataType="Real32", count=0, regionLevel=True, isDefaultOutput=True), resetOut=dict(description="Reset signal", dataType="Real32", count=1, regionLevel=True, isDefaultOutput=False), reward=dict( description= "The reward from the current iteration. Dictionary with value.", dataType='Real32', count=0, required=False, regionLevel=True, isDefaultOutput=True)), parameters=dict(), ) return spec def __init__(self, **kwargs): # We don't know the sensed value's type, so it's not a spec parameter. self._sensedValue = None # coordinate encoder also taking 6400 output SDR with 4% on self.coordinateEncoder = CoordinateEncoder(n=80 * 80, w=257) def initialize(self): pass def compute(self, inputs, outputs): if self.encoder is None: raise Exception('Please insert an encoder.') viualfield = [] self.encoder.encodeIntoArray(self._sensedValue["observation"], viualfield, self._sensedValue['mouse']) # append coordinate encoded with radius 3 npCoords = np.array([ self._sensedValue['coordinates']['x'], self._sensedValue['coordinates']['y'] ]) coords = self.coordinateEncoder.encode((npCoords, 3)) outputs['encoded'][:] = np.append(viualfield, coords) outputs['reward'][:] = self._sensedValue["reward"] outputs['resetOut'][:] = self._sensedValue["done"] # Debug print '~~~~~ Sensor Summary ~~~~~' print "[Sensor] Inputs:Reward", self._sensedValue['reward'] print "[Sensor] Outputs:Reward", outputs['reward'] print "[Sensor] Outputs:Done", outputs['resetOut'][0] print "[Sensor] Observation", outputs['encoded'].nonzero()[0] print "[Sensor] Observation on bits length", len( outputs['encoded'].nonzero()[0]), 'from total', len( outputs['encoded']) def getOutputElementCount(self, name): if name == 'encoded': return self.encoder.getWidth() + self.coordinateEncoder.getWidth() elif name == 'reward': return 1 else: raise Exception('Unrecognized output %s' % name) def getState(self): """ Returns the current state saved in the encoder (1d numpy array) """ return self.encoder.lastRecord def getSensedValue(self): """ :return: sensed value """ return self._sensedValue def setSensedValue(self, value): """ :param value: will be encoded when this region does a compute. """ self._sensedValue = value def getParameter(self, parameterName, index=-1): if parameter == 'sensedValue': raise Exception( 'For the PluggableUniverseSensor, get the sensedValue via the getSensedValue method' ) else: raise Exception('Unrecognized parameter %s' % parameterName) def setParameter(self, parameterName, index, parameterValue): if parameter == 'sensedValue': raise Exception( 'For the PluggableUniverseSensor, set the sensedValue via the setSensedValue method' ) else: raise Exception('Unrecognized parameter %s' % parameterName)
class ContinuousLocationObjectMachine(ObjectMachineBase): """ This implementation of the object machine uses continuous locations instead of discrete random ones. They are created using a CoordinateEncoder. The "objects" should be PhysicalObjects as defined in physical_object_base and physical_objects. Subclass the base implementation for specific needs. """ def __init__(self, numInputBits=41, sensorInputSize=2048, externalInputSize=2048, numCorticalColumns=1, numFeatures=400, dimension=3, seed=42): """ At creation, the SimpleObjectMachine creates a pool of locations and features SDR's. Parameters: ---------------------------- @param numInputBits (int) Number of ON bits in the input. Note: it should be uneven as the encoder only accepts uneven number of bits. @param sensorInputSize (int) Total number of bits in the sensory input @param externalInputSize (int) Total number of bits the external (location) input @param numCorticalColumns (int) Number of cortical columns used in the experiment @param dimension (int) Dimension of the locations. Will typically be 3. @param numFeatures (int) Number of feature SDRs to generate per cortical column. There is typically no need to not use the default value, unless the user knows he will use more than 400 patterns. @param seed (int) Seed to be used in the machine """ super(ContinuousLocationObjectMachine, self).__init__(numInputBits, sensorInputSize, externalInputSize, numCorticalColumns, seed) # location and features pool self.numFeatures = numFeatures self._generateFeatures() self.dimension = dimension self.locationEncoder = CoordinateEncoder(w=numInputBits, n=externalInputSize, name="locationEncoder") def provideObjectsToLearn(self, learningConfig, plot=False): """ Returns the objects in a canonical format to be sent to an experiment. The input, learningConfig, should have the following format. It is a mapping from object to a list of features to sample locations from, and the number of points to sample from each feature. Note that these objects should be first added with .addObjects(). These features can be either hard-coded with their key or accessed with .getFeatures. An other possibility is to directly specify locations. The machine will use the object to find the corresponding feature (an empty feature will be sent if the location is not on the object's surface). learningConfig = { # hard-coded keys and number of points "cube": [("face", 5), ("edge", 5), ("vertex", 3)], # programmatically-accessed keys and number of points "cylinder": [(feature, 5) for feature in cylinder.getFeatures()], # specific locations "sphere": [(10, 5, 3), (12, 45, 32), (12, 5, 46)], } The returned format is a a dictionary where the keys are object names, and values are lists of sensations, each sensation being a mapping from cortical column index to a pair of SDR's (one location and one feature). Parameters: ---------------------------- @param learningConfig (dict) Configuration for learning, as described above. """ objects = {} for objectName, locationList in learningConfig.iteritems(): sensationList = [] physicalObject = self.objects[objectName] if plot: fig, ax = physicalObject.plot() for element in locationList: # location name and number of points if len(element) == 2: featureName, numLocations = element for _ in xrange(numLocations): location = physicalObject.sampleLocationFromFeature( featureName) sensationList.append( self._getSDRPairs( [(location, physicalObject.getFeatureID(location))] * self.numColumns)) if plot: x, y, z = tuple(location) ax.scatter(x, y, z, marker="v", s=100, c="r") # explicit location elif len(element) == 3: location = list(element) sensationList.append( self._getSDRPairs([ (location, physicalObject.getFeatureID(location)) ] * self.numColumns)) if plot: x, y, z = tuple(location) ax.scatter(x, y, z, marker="v", s=100, c="r") else: raise ValueError("Unsupported type for location spec") objects[objectName] = sensationList if plot: plt.title("Learning points for object {}".format(objectName)) plt.savefig("learn_{}.png".format(objectName)) plt.close() self._checkObjectsToLearn(objects) return objects def provideObjectToInfer(self, inferenceConfig, plot=False): """ Returns the sensations in a canonical format to be sent to an experiment. The input inferenceConfig should be a dict with the following form. The "pairs" field provide a mapping from cortical column to a list of sensations, each sensations being either: - a feature key to sample a location from - an explicit location { "numSteps": 2 # number of sensations "noiseLevel": 0.05 # noise to add to sensations (optional) "objectName": 0 # optional "pairs": { 0: ["random", "face"] # locations for cortical column 0 1: [(12, 32, 34), (23, 23, 32)] # locations for cortical column 1 } } The returned format is a a lists of sensations, each sensation being a mapping from cortical column index to a pair of SDR's (one location and one feature). Parameters: ---------------------------- @param inferenceConfig (dict) Inference spec for experiment (cf above for format) """ if "numSteps" in inferenceConfig: numSteps = inferenceConfig["numSteps"] else: numSteps = len(inferenceConfig["pairs"][0]) if "noiseLevel" in inferenceConfig: noise = inferenceConfig["noiseLevel"] else: noise = None # some checks if numSteps == 0: raise ValueError("No inference steps were provided") for col in xrange(self.numColumns): if len(inferenceConfig["pairs"][col]) != numSteps: raise ValueError( "Incompatible numSteps and actual inference steps") if "objectName" in inferenceConfig: physicalObject = self.objects[inferenceConfig["objectName"]] else: physicalObject = None if plot: # don't use if object is not known fig, ax = physicalObject.plot() colors = plt.cm.rainbow(np.linspace(0, 1, numSteps)) sensationSteps = [] for step in xrange(numSteps): pairs = [ inferenceConfig["pairs"][col][step] for col in xrange(self.numColumns) ] for i in xrange(len(pairs)): if isinstance(pairs[i], str): location = physicalObject.sampleLocationFromFeature( pairs[i]) pairs[i] = (location, physicalObject.getFeatureID(location)) else: location = pairs[i] pairs[i] = (location, physicalObject.getFeatureID(location)) if plot: x, y, z = tuple(location) ax.scatter(x, y, z, marker="v", s=100, c=colors[step]) sensationSteps.append(self._getSDRPairs(pairs, noise=noise)) if plot: plt.title("Inference points for object {}".format( inferenceConfig["objectName"])) plt.savefig("infer_{}.png".format(inferenceConfig["objectName"])) plt.close() self._checkObjectToInfer(sensationSteps) return sensationSteps def addObject(self, object, name=None): """ Adds an object to the Machine. Objects should be PhysicalObjects. """ if name is None: name = len(self.objects) self.objects[name] = object def _getSDRPairs(self, pairs, noise=None): """ This method takes a list of (location, feature) pairs (one pair per cortical column), and returns a sensation dict in the correct format, adding noise if necessary. In each pair, the location is an actual integer location to be encoded, and the feature is just an index. """ sensations = {} for col in xrange(self.numColumns): location, featureID = pairs[col] location = [int(coord) for coord in location] location = self.locationEncoder.encode( (np.array(location, dtype="int32"), self._getRadius(location))) location = set(location.nonzero()[0]) # generate empty feature if requested if featureID == -1: feature = set() # generate union of features if requested elif isinstance(featureID, tuple): feature = set() for idx in list(featureID): feature = feature | self.features[col][idx] else: feature = self.features[col][featureID] if noise is not None: location = self._addNoise(location, noise) feature = self._addNoise(feature, noise) sensations[col] = (location, feature) return sensations def _getRadius(self, location): """ Returns the radius associated with the given location. This is a bit of an awkward argument to the CoordinateEncoder, which specifies the resolution (in was used to encode differently depending on speed in the GPS encoder). Since the coordinates are object-centric, for now we use the "point radius" as an heuristic, but this should be experimented and improved. """ # TODO: find better heuristic return int(math.sqrt(sum([coord**2 for coord in location]))) def _addNoise(self, pattern, noiseLevel): """ Adds noise the given list of patterns and returns a list of noisy copies. """ if pattern is None: return None newBits = [] for bit in pattern: if random.random() < noiseLevel: newBits.append(random.randint(0, max(pattern))) else: newBits.append(bit) return set(newBits) def _generatePattern(self, numBits, totalSize): """ Generates a random SDR with specified number of bits and total size. """ cellsIndices = range(totalSize) random.shuffle(cellsIndices) return set(cellsIndices[:numBits]) def _generateFeatures(self): """ Generates a pool of features to be used for the experiments. For each index, numColumns SDR's are created, as locations for the same feature should be different for each column. """ size = self.sensorInputSize bits = self.numInputBits self.features = [] for _ in xrange(self.numColumns): self.features.append([ self._generatePattern(bits, size) for _ in xrange(self.numFeatures) ])
class Agent(object): def __init__(self): self.encoder = CoordinateEncoder(n=1024, w=21) self.motorEncoder = ScalarEncoder(21, -1, 1, n=1024) self.tm = MonitoredExtendedTemporalMemory( columnDimensions=[2048], basalInputDimensions: (999999,) # Dodge input checking. cellsPerColumn=1, initialPermanence=0.5, connectedPermanence=0.6, permanenceIncrement=0.1, permanenceDecrement=0.02, minThreshold=35, activationThreshold=35, maxNewSynapseCount=40) self.plotter = Plotter(self.tm, showOverlaps=False, showOverlapsValues=False) self.lastState = None self.lastAction = None self.prevMotorPattern = () def sync(self, outputData): if not ("location" in outputData and "steer" in outputData): print "Warning: Missing data:", outputData return reset = outputData.get("reset") or False if reset: print "Reset." self.tm.reset() location = outputData["location"] steer = outputData["steer"] x = int(location["x"] * SCALE) z = int(location["z"] * SCALE) coordinate = numpy.array([x, z]) encoding = self.encoder.encode((coordinate, RADIUS)) motorEncoding = self.motorEncoder.encode(steer) sensorPattern = set(encoding.nonzero()[0]) motorPattern = set(motorEncoding.nonzero()[0]) self.tm.compute(sensorPattern, activeCellsExternalBasal=motorPattern, reinforceCandidatesExternalBasal=self.prevMotorPattern, growthCandidatesExternalBasal=self.prevMotorPattern) print self.tm.mmPrettyPrintMetrics(self.tm.mmGetDefaultMetrics()) self.plotter.update(encoding, reset) if reset: self.plotter.render() self.lastState = encoding self.lastAction = steer self.prevMotorPattern = motorPattern