class STDP(nengo.learning_rules.LearningRuleType): """Spike-timing dependent plasticity rule.""" # Used by other Nengo objects modifies = "weights" probeable = ("pre_trace", "post_trace", "pre_scale", "post_scale") # Parameters pre_tau = NumberParam("pre_tau", low=0, low_open=True) pre_amp = NumberParam("pre_amp", low=0, low_open=True) post_tau = NumberParam("post_tau", low=0, low_open=True) post_amp = NumberParam("post_amp", low=0, low_open=True) bounds = StringParam("bounds") max_weight = NumberParam("max_weight") min_weight = NumberParam("min_weight") def __init__( self, pre_tau=0.0168, post_tau=0.0337, pre_amp=1.0, post_amp=1.0, bounds="hard", max_weight=0.3, min_weight=-0.3, learning_rate=1e-9, ): self.pre_tau = pre_tau self.post_tau = post_tau self.pre_amp = pre_amp self.post_amp = post_amp self.bounds = bounds self.max_weight = max_weight self.min_weight = min_weight super(STDP, self).__init__(learning_rate)
class TestClass: """For testing that defaults are properly rendered in docs.""" int_param = IntParam("int_param", default=1) str_param = StringParam("str_param", default="hello") def __init__(self, int_param=Default, str_param=Default, module_param=np.random): """Init method""" def another_method(self, module_param=np.random): """A method"""
class STDP(nengo.learning_rules.LearningRuleType): """Simplified Spike-timing dependent plasticity rule.""" # Used by other Nengo objects modifies = 'weights' probeable = ('pre_trace', 'post_trace', 'pre_scale', 'post_scale') # Parameters # var max_weight = 1.0f # var min_weight = 0.0001f # var alf_p = 0.01f # var alf_n = 0.005f # var beta_p = 1.5f # var beta_n = 2.5f alf_p = NumberParam('alf_p', low=0, low_open=True) alf_n = NumberParam('alf_n', low=0, low_open=True) beta_p = NumberParam('beta_p', low=0, low_open=True) beta_n = NumberParam('beta_n', low=0, low_open=True) bounds = StringParam('bounds') max_weight = NumberParam('max_weight') min_weight = NumberParam('min_weight') def __init__( self, alf_p=0.01, alf_n=0.005, beta_p=1.5, beta_n=2.5, bounds='hard', max_weight=1.0, min_weight=0.0001 ): self.alf_p = alf_p self.alf_n = alf_n self.beta_p = beta_p self.beta_n = beta_n self.bounds = bounds self.max_weight = max_weight self.min_weight = min_weight super(STDP, self).__init__()
class BahlNeuron(NeuronType): """Compartmental neuron from Bahl et al 2012.""" probeable = ('spikes', 'voltage') bias_method = StringParam('bias_method') def __init__(self, bias_method='decode'): super(BahlNeuron, self).__init__() self.bias_method = bias_method def rates(self, x, gain, bias): return x def gain_bias(self, max_rates, intercepts): return np.ones(len(max_rates)), np.ones(len(max_rates)) def step_math(self, dt, spiked, neurons, voltage, time): """ Run NEURON forward one nengo timestep. Compare the current and previous spike arrays for this bioneuron. If they're different, the neuron has spiked. """ neuron.run(time*1000) for i, bahl in enumerate(neurons): count, volt = bahl.update() spiked[i] = count / dt voltage[i] = volt @property def _argreprs(self): args = [] def add(attr, default): if getattr(self, attr) != default: args.append("%s=%s" % (attr, getattr(self, attr))) add("bias_method", self.bias_method) return args
class NengoObject(SupportDefaultsMixin, metaclass=NetworkMember): """A base class for Nengo objects. Parameters ---------- label : string A descriptive label for the object. seed : int The seed used for random number generation. Attributes ---------- label : string A descriptive label for the object. seed : int The seed used for random number generation. """ # Order in which parameters have to be initialized. # Missing parameters will be initialized last in an undefined order. # This is needed for pickling and copying of Nengo objects when the # parameter initialization order matters. _param_init_order = [] label = StringParam("label", default=None, optional=True) seed = IntParam("seed", default=None, optional=True) def __init__(self, label, seed): super().__init__() self._initialized = False self.label = label self.seed = seed def __getstate__(self): state = self.__dict__.copy() state["_initialized"] = False for attr in self.params: param = getattr(type(self), attr) if self in param: state[attr] = getattr(self, attr) return state def __setstate__(self, state): for attr in self._param_init_order: setattr(self, attr, state.pop(attr)) for attr in self.params: if attr in state: setattr(self, attr, state.pop(attr)) for k, v in state.items(): setattr(self, k, v) self._initialized = True if len(nengo.Network.context) > 0: warnings.warn(NotAddedToNetworkWarning(self)) def __setattr__(self, name, val): initialized = hasattr(self, "_initialized") and self._initialized if initialized and not hasattr(self, name): warnings.warn( "Creating new attribute '%s' on '%s'. " "Did you mean to change an existing attribute?" % (name, self), SyntaxWarning, ) super().__setattr__(name, val) def __str__(self): return self._str( include_id=not hasattr(self, "label") or self.label is None) def __repr__(self): return self._str(include_id=True) def _str(self, include_id): return "<%s%s%s>" % ( type(self).__name__, "" if not hasattr(self, "label") else " (unlabeled)" if self.label is None else ' "%s"' % self.label, " at 0x%x" % id(self) if include_id else "", ) @property def params(self): """Returns a list of parameter names that can be set.""" return list(iter_params(self)) def copy(self, add_to_container=True): with warnings.catch_warnings(): # We warn when copying since we can't change add_to_container. # However, we deal with it here, so we ignore the warning. warnings.simplefilter("ignore", category=NotAddedToNetworkWarning) c = copy(self) if add_to_container: nengo.Network.add(c) return c
class Ensemble(NengoObject): """A group of neurons that collectively represent a vector. Parameters ---------- n_neurons : int The number of neurons. dimensions : int The number of representational dimensions. radius : int, optional The representational radius of the ensemble. encoders : Distribution or ndarray (`n_neurons`, `dimensions`), optional The encoders, used to transform from representational space to neuron space. Each row is a neuron's encoder, each column is a representational dimension. intercepts : Distribution or ndarray (`n_neurons`), optional The point along each neuron's encoder where its activity is zero. If e is the neuron's encoder, then the activity will be zero when dot(x, e) <= c, where c is the given intercept. max_rates : Distribution or ndarray (`n_neurons`), optional The activity of each neuron when dot(x, e) = 1, where e is the neuron's encoder. eval_points : Distribution or ndarray (`n_eval_points`, `dims`), optional The evaluation points used for decoder solving, spanning the interval (-radius, radius) in each dimension, or a distribution from which to choose evaluation points. Default: ``UniformHypersphere``. n_eval_points : int, optional The number of evaluation points to be drawn from the `eval_points` distribution. If None (the default), then a heuristic is used to determine the number of evaluation points. neuron_type : Neurons, optional The model that simulates all neurons in the ensemble. noise : StochasticProcess, optional Random noise injected directly into each neuron in the ensemble as current. A sample is drawn for each individual neuron on every simulation step. seed : int, optional The seed used for random number generation. label : str, optional A name for the ensemble. Used for debugging and visualization. """ n_neurons = IntParam(default=None, low=1) dimensions = IntParam(default=None, low=1) radius = NumberParam(default=1, low=1e-10) neuron_type = NeuronTypeParam(default=LIF()) encoders = DistributionParam(default=UniformHypersphere(surface=True), sample_shape=('n_neurons', 'dimensions')) intercepts = DistributionParam(default=Uniform(-1.0, 1.0), optional=True, sample_shape=('n_neurons', )) max_rates = DistributionParam(default=Uniform(200, 400), optional=True, sample_shape=('n_neurons', )) n_eval_points = IntParam(default=None, optional=True) eval_points = DistributionParam(default=UniformHypersphere(), sample_shape=('*', 'dimensions')) bias = DistributionParam(default=None, optional=True, sample_shape=('n_neurons', )) gain = DistributionParam(default=None, optional=True, sample_shape=('n_neurons', )) noise = StochasticProcessParam(default=None, optional=True) seed = IntParam(default=None, optional=True) label = StringParam(default=None, optional=True) def __init__(self, n_neurons, dimensions, radius=Default, encoders=Default, intercepts=Default, max_rates=Default, eval_points=Default, n_eval_points=Default, neuron_type=Default, gain=Default, bias=Default, noise=Default, seed=Default, label=Default): self.n_neurons = n_neurons self.dimensions = dimensions self.radius = radius self.encoders = encoders self.intercepts = intercepts self.max_rates = max_rates self.label = label self.n_eval_points = n_eval_points self.eval_points = eval_points self.bias = bias self.gain = gain self.neuron_type = neuron_type self.noise = noise self.seed = seed self._neurons = Neurons(self) def __getitem__(self, key): return ObjView(self, key) def __len__(self): return self.dimensions @property def neurons(self): return self._neurons @neurons.setter def neurons(self, dummy): raise AttributeError("neurons cannot be overwritten.") @property def probeable(self): return ["decoded_output", "input"] @property def size_in(self): return self.dimensions @property def size_out(self): return self.dimensions
class NengoObject(with_metaclass(NetworkMember)): """A base class for Nengo objects. Parameters ---------- label : string A descriptive label for the object. seed : int The seed used for random number generation. Attributes ---------- label : string A descriptive label for the object. seed : int The seed used for random number generation. """ label = StringParam('label', default=None, optional=True) seed = IntParam('seed', default=None, optional=True) def __init__(self, label, seed): self.label = label self.seed = seed def __getstate__(self): raise NotImplementedError("Nengo objects do not support pickling") def __setstate__(self, state): raise NotImplementedError("Nengo objects do not support pickling") def __setattr__(self, name, val): if hasattr(self, '_initialized') and not hasattr(self, name): warnings.warn( "Creating new attribute '%s' on '%s'. " "Did you mean to change an existing attribute?" % (name, self), SyntaxWarning) if val is Default: val = Config.default(type(self), name) if rc.getboolean('exceptions', 'simplified'): try: super(NengoObject, self).__setattr__(name, val) except ValidationError: exc_info = sys.exc_info() reraise(exc_info[0], exc_info[1], None) else: super(NengoObject, self).__setattr__(name, val) def __str__(self): return self._str( include_id=not hasattr(self, 'label') or self.label is None) def __repr__(self): return self._str(include_id=True) def _str(self, include_id): return "<%s%s%s>" % ( self.__class__.__name__, "" if not hasattr(self, 'label') else " (unlabeled)" if self.label is None else ' "%s"' % self.label, " at 0x%x" % id(self) if include_id else "") @classmethod def param_list(cls): """Returns a list of parameter names that can be set.""" return (attr for attr in dir(cls) if is_param(getattr(cls, attr))) @property def params(self): """Returns a list of parameter names that can be set.""" return self.param_list()
class Network: """A network contains ensembles, nodes, connections, and other networks. A network is primarily used for grouping together related objects and connections for visualization purposes. However, you can also use networks as a nice way to reuse network creation code. To group together related objects that you do not need to reuse, you can create a new ``Network`` and add objects in a ``with`` block. For example: .. testcode:: network = nengo.Network() with network: with nengo.Network(label="Vision"): v1 = nengo.Ensemble(n_neurons=100, dimensions=2) with nengo.Network(label="Motor"): sma = nengo.Ensemble(n_neurons=100, dimensions=2) nengo.Connection(v1, sma) To reuse a group of related objects, you can create a new subclass of ``Network``, and add objects in the ``__init__`` method. For example: .. testcode:: class OcularDominance(nengo.Network): def __init__(self): self.column = nengo.Ensemble(n_neurons=100, dimensions=2) network = nengo.Network() with network: left_eye = OcularDominance() right_eye = OcularDominance() nengo.Connection(left_eye.column, right_eye.column) Parameters ---------- label : str, optional Name of the network. seed : int, optional Random number seed that will be fed to the random number generator. Setting the seed makes the network's build process deterministic. add_to_container : bool, optional Determines if this network will be added to the current container. If None, this network will be added to the network at the top of the ``Network.context`` stack unless the stack is empty. Attributes ---------- connections : list `.Connection` instances in this network. ensembles : list `.Ensemble` instances in this network. label : str Name of this network. networks : list `.Network` instances in this network. nodes : list `.Node` instances in this network. probes : list `.Probe` instances in this network. seed : int Random seed used by this network. """ context = ThreadLocalStack(maxsize=100) # static stack of Network objects label = StringParam("label", optional=True, readonly=False) seed = IntParam("seed", optional=True, readonly=False) def __init__(self, label=None, seed=None, add_to_container=None): self.label = label self.seed = seed self._config = self.default_config() self._objects = { Ensemble: [], Node: [], Connection: [], Network: [], Probe: [] } self._ensembles = self.objects[Ensemble] self._nodes = self.objects[Node] self._connections = self.objects[Connection] self._networks = self.objects[Network] self._probes = self.objects[Probe] # By default, we want to add to the current context, unless there is # no context; i.e., we're creating a top-level network. if add_to_container is None: add_to_container = len(Network.context) > 0 if add_to_container: Network.add(self) @staticmethod def add(obj): """Add the passed object to ``Network.context``.""" if len(Network.context) == 0: raise NetworkContextError( "'%s' must either be created inside a ``with network:`` " "block, or set add_to_container=False in the object's " "constructor." % obj) network = Network.context[-1] if not isinstance(network, Network): raise NetworkContextError("Current context (%s) is not a network" % network) for cls in type(obj).__mro__: if cls in network.objects: network.objects[cls].append(obj) break else: raise NetworkContextError("Objects of type %r cannot be added to " "networks." % type(obj).__name__) @staticmethod def default_config(): """Constructs a `~.Config` object for setting defaults.""" return Config(Connection, Ensemble, Node, Probe) def _all_objects(self, object_type): """Returns a list of all objects of the specified type.""" # Make a copy of this network's list objects = list(self.objects[object_type]) for subnet in self.networks: objects.extend(subnet._all_objects(object_type)) return objects @property def all_objects(self): """(list) All objects in this network and its subnetworks.""" objects = [] for object_type in self.objects: objects.extend(self._all_objects(object_type)) return objects @property def all_ensembles(self): """(list) All ensembles in this network and its subnetworks.""" return self._all_objects(Ensemble) @property def all_nodes(self): """(list) All nodes in this network and its subnetworks.""" return self._all_objects(Node) @property def all_networks(self): """(list) All networks in this network and its subnetworks.""" return self._all_objects(Network) @property def all_connections(self): """(list) All connections in this network and its subnetworks.""" return self._all_objects(Connection) @property def all_probes(self): """(list) All probes in this network and its subnetworks.""" return self._all_objects(Probe) @property def objects(self): return self._objects @objects.setter def objects(self, _): raise ReadonlyError(attr="objects", obj=self) @property def ensembles(self): return self._ensembles @ensembles.setter def ensembles(self, _): raise ReadonlyError(attr="ensembles", obj=self) @property def nodes(self): return self._nodes @nodes.setter def nodes(self, _): raise ReadonlyError(attr="nodes", obj=self) @property def networks(self): return self._networks @networks.setter def networks(self, _): raise ReadonlyError(attr="networks", obj=self) @property def connections(self): return self._connections @connections.setter def connections(self, _): raise ReadonlyError(attr="connections", obj=self) @property def probes(self): return self._probes @probes.setter def probes(self, _): raise ReadonlyError(attr="probes", obj=self) @property def config(self): """(`.Config`) Configuration for this network.""" return self._config @config.setter def config(self, _): raise ReadonlyError(attr="config", obj=self) @property def n_neurons(self): """(int) Number of neurons in this network, including subnetworks.""" return sum(ens.n_neurons for ens in self.all_ensembles) def __contains__(self, obj): return type(obj) in self.objects and obj in self.objects[type(obj)] def __enter__(self): Network.context.append(self) self._config.__enter__() return self def __exit__(self, dummy_exc_type, dummy_exc_value, dummy_tb): if len(Network.context) == 0: raise NetworkContextError( "Network.context in bad state; was empty when " "exiting from a 'with' block.") config = Config.context[-1] if config is not self._config: raise ConfigError("Config.context in bad state; was expecting " "current context to be '%s' but instead got " "'%s'." % (self._config, config)) network = Network.context.pop() if network is not self: raise NetworkContextError( "Network.context in bad state; was expecting current context " "to be '%s' but instead got '%s'." % (self, network)) self._config.__exit__(dummy_exc_type, dummy_exc_value, dummy_tb) def __getstate__(self): state = self.__dict__.copy() state["label"] = self.label state["seed"] = self.seed return state def __setstate__(self, state): for k, v in state.items(): setattr(self, k, v) if len(Network.context) > 0: warnings.warn(NotAddedToNetworkWarning(self)) def __str__(self): return "<%s %s>" % ( type(self).__name__, '"%s"' % self.label if self.label is not None else "(unlabeled) at 0x%x" % id(self), ) def __repr__(self): return "<%s %s %s>" % ( type(self).__name__, '"%s"' % self.label if self.label is not None else "(unlabeled)", "at 0x%x" % id(self), ) def copy(self, add_to_container=None): with warnings.catch_warnings(): # We warn when copying since we can't change add_to_container. # However, we deal with it here, so we ignore the warning. warnings.simplefilter("ignore", category=NotAddedToNetworkWarning) c = deepcopy(self) if add_to_container is None: add_to_container = len(Network.context) > 0 if add_to_container: Network.add(c) return c
class Probe(NengoObject): """A probe is an object that receives data from the simulation. This is to be used in any situation where you wish to gather simulation data (spike data, represented values, neuron voltages, etc.) for analysis. Probes cannot directly affect the simulation. All Nengo objects can be probed (except Probes themselves). Each object has different attributes that can be probed. To see what is probeable for each object, print its `probeable` attribute. >>> with nengo.Network(): ... ens = nengo.Ensemble(10, 1) >>> print(ens.probeable) ['decoded_output', 'input'] Parameters ---------- target : Ensemble, Node, Connection The Nengo object to connect to the probe. attr : str, optional The quantity to probe. Refer to the target's ``probeable`` list for details. Defaults to the first element in the list. sample_every : float, optional Sampling period in seconds. synapse : float, optional Post-synaptic time constant (PSTC) to use for filtering. Default is no filtering. solver : Solver, optional Instance of a Solver class to compute decoders for probes that require them (see `nengo.solvers`). Defaults to the same solver as Connection. seed : int The seed used for random number generation in the Connection. label : str, optional A name for the probe. Used for debugging and visualization. """ target = TargetParam(nonzero_size_out=True) attr = AttributeParam(default=None) sample_every = NumberParam(default=None, optional=True, low=1e-10) synapse = SynapseParam(default=None) solver = ProbeSolverParam(default=ConnectionDefault) seed = IntParam(default=None, optional=True) label = StringParam(default=None, optional=True) def __init__(self, target, attr=None, sample_every=Default, synapse=Default, solver=Default, seed=Default, label=Default): self.target = target self.attr = attr if attr is not None else self.obj.probeable[0] self.sample_every = sample_every self.synapse = synapse self.solver = solver self.seed = seed self.label = label @property def obj(self): return (self.target.obj if isinstance(self.target, ObjView) else self.target) @property def slice(self): return (self.target.slice if isinstance(self.target, ObjView) else slice(None)) @property def size_in(self): return self.target.size_out @property def size_out(self): return 0 def __str__(self): return "<Probe%s of '%s' of %s>" % ("" if self.label is None else ' "%s"' % self.label, self.attr, self.target) def __repr__(self): return "<Probe%s at 0x%x of '%s' of %s>" % ( "" if self.label is None else ' "%s"' % self.label, id(self), self.attr, self.target)
class Connection(nengo.config.SupportDefaultsMixin): label = StringParam('label', default=None, optional=True) seed = IntParam('seed', default=None, optional=True) pre = PreParam('pre') post = nengo.connection.PrePostParam('post', nonzero_size_in=True) synapse_exc = nengo.connection.SynapseParam( 'synapse_exc', default=nengo.synapses.Lowpass(tau=0.005)) synapse_inh = nengo.connection.SynapseParam( 'synapse_inh', default=nengo.synapses.Lowpass(tau=0.005)) function_info = ConnectionFunctionParam('function', default=None, optional=True) transform = ConnectionTransformParam('transform', default=1.0) solver = nengo.solvers.SolverParam('solver', default=QPSolver()) eval_points = nengo.dists.DistOrArrayParam('eval_points', default=None, optional=True, sample_shape=('*', 'size_in')) scale_eval_points = BoolParam('scale_eval_points', default=True) n_eval_points = IntParam('n_eval_points', default=None, optional=True) decode_bias = BoolParam('decode_bias', default=True) max_n_post_synapses = IntParam('max_n_post_synapses', low=0, default=None, optional=True) max_n_post_synapses_exc = IntParam('max_n_post_synapses_exc', low=0, default=None, optional=True) max_n_post_synapses_inh = IntParam('max_n_post_synapses_inh', low=0, default=None, optional=True) _param_init_order = [ 'pre', 'post', 'synapse_exc', 'synapse_inh', 'function_info' ] def __init__(self, pre, post, synapse_exc=Default, synapse_inh=Default, function=Default, transform=Default, solver=Default, eval_points=Default, scale_eval_points=Default, n_eval_points=Default, decode_bias=Default, max_n_post_synapses=Default, max_n_post_synapses_exc=Default, max_n_post_synapses_inh=Default, label=Default, seed=Default): super().__init__() # Copy the parameters self.label = label self.seed = seed self.pre = pre self.post = post self.synapse_exc = synapse_exc self.synapse_inh = synapse_inh self.eval_points = eval_points self.scale_eval_points = scale_eval_points self.n_eval_points = n_eval_points self.decode_bias = decode_bias self.function_info = function self.transform = transform self.solver = solver self.max_n_post_synapses = max_n_post_synapses self.max_n_post_synapses_exc = max_n_post_synapses_exc self.max_n_post_synapses_inh = max_n_post_synapses_inh # For each pre object add two actual nengo connections: an excitatory # path and an inhibitory path self.connections = [] arr_ens, arr_ns, _ = self.pre.flatten() for i, (ens, ns) in enumerate(zip(arr_ens, arr_ns)): def mkcon(synapse_type, synapse): return ConnectionPart(pre=ens, post=self.post, transform=np.zeros( (self.post.size_in, ens.size_out)), seed=self.seed, synapse=synapse, solver=SolverWrapper( self.solver, i, self, ns, synapse_type), synapse_type=synapse_type) self.connections.append( (mkcon(Excitatory, synapse_exc), mkcon(Inhibitory, synapse_inh))) def __str__(self): return "<Connection {}>".format(self._str) def __repr__(self): return "<Connection at {:06X} {}>".format(id(self), self._str) @property def _str(self): if self.label is not None: return self.label desc = "" if self.function is None else " computing '{}'".format( function_name(self.function)) return "from {} to {}{}".format(self.pre, self.post, desc) @property def function(self): return self.function_info.function @function.setter def function(self, function): self.function_info = function @property def is_decoded(self): return not (self.solver.weights or (isinstance(self.pre_obj, Neurons) and isinstance(self.post_obj, Neurons))) @property def _label(self): if self.label is not None: return self.label return "from %s to %s%s" % (self.pre, self.post, " computing '%s'" % function_name(self.function) if self.function is not None else "") @property def post_obj(self): return self.post @property def pre_obj(self): return self.pre @property def pre_slice(self): return slice(None) @property def post_slice(self): return slice(None) @property def size_in(self): return self.pre.size_out @property def size_mid(self): size = self.function_info.size return self.size_in if size is None else size @property def size_out(self): return self.post.size_in
class Node(NengoObject): """Provides arbitrary data to Nengo objects. Nodes can accept input, and perform arbitrary computations for the purpose of controlling a Nengo simulation. Nodes are typically not part of a brain model per se, but serve to summarize the assumptions being made about sensory data or other environment variables that cannot be generated by a brain model alone. Nodes can also be used to test models by providing specific input signals to parts of the model, and can simplify the input/output interface of a Network when used as a relay to/from its internal Ensembles (see networks.EnsembleArray for an example). Parameters ---------- output : callable or array_like Function that transforms the Node inputs into outputs, or a constant output value. size_in : int, optional The number of dimensions of the input data parameter. size_out : int, optional The size of the output signal. Optional; if not specified, it will be determined based on the values of ``output`` and ``size_in``. label : str, optional A name for the node. Used for debugging and visualization. Attributes ---------- label : str The name of the node. output : callable or array_like The given output. size_in : int The number of dimensions of the input data parameter. size_out : int The number of output dimensions. """ output = OutputParam(default=None) size_in = IntParam(default=0, low=0) size_out = IntParam(default=None, low=0, optional=True) label = StringParam(default=None, optional=True) probeable = ListParam(default=['output']) def __init__(self, output=Default, size_in=Default, size_out=Default, label=Default): self.size_in = size_in self.size_out = size_out self.label = label self.output = output # Must be set after size_out; may modify size_out self.probeable = Default def __getitem__(self, key): return ObjView(self, key) def __len__(self): return self.size_out
class Probe(NengoObject): """A probe is an object that receives data from the simulation. This is to be used in any situation where you wish to gather simulation data (spike data, represented values, neuron voltages, etc.) for analysis. Probes cannot directly affect the simulation. TODO: Example usage for each object. Parameters ---------- target : Ensemble, Node, Connection The Nengo object to connect to the probe. attr : str, optional The quantity to probe. Refer to the target's ``probeable`` list for details. Defaults to the first element in the list. sample_every : float, optional Sampling period in seconds. conn_args : dict, optional Optional keyword arguments to pass to the Connection created for this probe. For example, passing ``synapse=pstc`` will filter the data. """ target = NengoObjectParam(nonzero_size_out=True) attr = StringParam(default=None) sample_every = NumberParam(default=None, optional=True, low=1e-10) conn_args = DictParam(default=None) seed = IntParam(default=None, optional=True) def __init__(self, target, attr=Default, sample_every=Default, **conn_args): if not hasattr(target, 'probeable') or len(target.probeable) == 0: raise TypeError("Type '%s' is not probeable" % target.__class__.__name__) conn_args.setdefault('synapse', None) # We'll use the first in the list as default self.attr = attr if attr is not Default else target.probeable[0] if self.attr not in target.probeable: raise ValueError("'%s' is not probeable for '%s'" % (self.attr, target)) self.target = target self.sample_every = sample_every self.conn_args = conn_args self.seed = conn_args.get('seed', None) @property def label(self): return "Probe(%s.%s)" % (self.target.label, self.attr) @property def size_in(self): # TODO: A bit of a hack; make less hacky. if isinstance(self.target, Ensemble) and self.attr != "decoded_output": return self.target.neurons.size_out return self.target.size_out @property def size_out(self): return 0