class StateMonitor(Group, CodeRunner): ''' Record values of state variables during a run To extract recorded values after a run, use the ``t`` attribute for the array of times at which values were recorded, and variable name attribute for the values. The values will have shape ``(len(indices), len(t))``, where ``indices`` are the array indices which were recorded. When indexing the `StateMonitor` directly, the returned object can be used to get the recorded values for the specified indices, i.e. the indexing semantic refers to the indices in ``source``, not to the relative indices of the recorded values. For example, when recording only neurons with even numbers, `mon[[0, 2]].v` will return the values for neurons 0 and 2, whereas `mon.v[[0, 2]]` will return the values for the first and third *recorded* neurons, i.e. for neurons 0 and 4. Parameters ---------- source : `Group` Which object to record values from. variables : str, sequence of str, True Which variables to record, or ``True`` to record all variables (note that this may use a great deal of memory). record : bool, sequence of ints Which indices to record, nothing is recorded for ``False``, everything is recorded for ``True`` (warning: may use a great deal of memory), or a specified subset of indices. dt : `Quantity`, optional The time step to be used for the monitor. Cannot be combined with the `clock` argument. clock : `Clock`, optional The update clock to be used. If neither a clock, nor the ``dt`` argument is specified, the clock of the `source` will be used. when : str, optional At which point during a time step the values should be recorded. Defaults to ``'start'``. order : int, optional The priority of of this group for operations occurring at the same time step and in the same scheduling slot. Defaults to 0. name : str, optional A unique name for the object, otherwise will use ``source.name+'statemonitor_0'``, etc. codeobj_class : `CodeObject`, optional The `CodeObject` class to create. Examples -------- Record all variables, first 5 indices:: eqs = """ dV/dt = (2-V)/(10*ms) : 1 """ threshold = 'V>1' reset = 'V = 0' G = NeuronGroup(100, eqs, threshold=threshold, reset=reset) G.V = rand(len(G)) M = StateMonitor(G, True, record=range(5)) run(100*ms) plot(M.t, M.V.T) show() Notes ----- Since this monitor by default records in the ``'start'`` time slot, recordings of the membrane potential in integrate-and-fire models may look unexpected: the recorded membrane potential trace will never be above threshold in an integrate-and-fire model, because the reset statement will have been applied already. Set the ``when`` keyword to a different value if this is not what you want. Note that ``record=True`` only works in runtime mode for synaptic variables. This is because the actual array of indices has to be calculated and this is not possible in standalone mode, where the synapses have not been created yet at this stage. Consider using an explicit array of indices instead, i.e. something like ``record=np.arange(n_synapses)``. ''' invalidates_magic_network = False add_to_magic_network = True def __init__(self, source, variables, record, dt=None, clock=None, when='start', order=0, name='statemonitor*', codeobj_class=None): self.source = source # Make the monitor use the explicitly defined namespace of its source # group (if it exists) self.namespace = getattr(source, 'namespace', None) self.codeobj_class = codeobj_class # run by default on source clock at the end if dt is None and clock is None: clock = source.clock # variables should always be a list of strings if variables is True: variables = source.equations.names elif isinstance(variables, str): variables = [variables] #: The variables to record self.record_variables = variables # record should always be an array of ints self.record_all = False if hasattr(record, '_indices'): # The ._indices method always returns absolute indices # If the source is already a subgroup of another group, we therefore # have to shift the indices to become relative to the subgroup record = record._indices() - getattr(source, '_offset', 0) if record is True: self.record_all = True try: record = np.arange(len(source), dtype=np.int32) except NotImplementedError: # In standalone mode, this is not possible for synaptic # variables because the number of synapses is not defined yet raise NotImplementedError( ('Cannot determine the actual ' 'indices to record for record=True. ' 'This can occur for example in ' 'standalone mode when trying to ' 'record a synaptic variable. ' 'Consider providing an explicit ' 'array of indices for the record ' 'argument.')) elif record is False: record = np.array([], dtype=np.int32) elif isinstance(record, numbers.Number): record = np.array([record], dtype=np.int32) else: record = np.asarray(record, dtype=np.int32) #: The array of recorded indices self.record = record self.n_indices = len(record) # Some dummy code so that code generation takes care of the indexing # and subexpressions code = ['_to_record_%s = _source_%s' % (v, v) for v in variables] code = '\n'.join(code) CodeRunner.__init__(self, group=self, template='statemonitor', code=code, name=name, clock=clock, dt=dt, when=when, order=order, check_units=False) self.add_dependency(source) # Setup variables self.variables = Variables(self) self.variables.add_dynamic_array( 't', size=0, dimensions=second.dim, constant=False, dtype=self._clock.variables['t'].dtype) self.variables.add_array('N', dtype=np.int32, size=1, scalar=True, read_only=True) self.variables.add_array('_indices', size=len(self.record), dtype=self.record.dtype, constant=True, read_only=True, values=self.record) self.variables.create_clock_variables(self._clock, prefix='_clock_') for varname in variables: var = source.variables[varname] if var.scalar and len(self.record) > 1: logger.warn(('Variable %s is a shared variable but it will be ' 'recorded once for every target.' % varname), once=True) index = source.variables.indices[varname] self.variables.add_reference('_source_%s' % varname, source, varname, index=index) if not index in ('_idx', '0') and index not in variables: self.variables.add_reference(index, source) self.variables.add_dynamic_array(varname, size=(0, len(self.record)), resize_along_first=True, dimensions=var.dim, dtype=var.dtype, constant=False, read_only=True) for varname in variables: var = self.source.variables[varname] self.variables.add_auxiliary_variable('_to_record_' + varname, dimensions=var.dim, dtype=var.dtype, scalar=var.scalar) self.recorded_variables = dict([(varname, self.variables[varname]) for varname in variables]) recorded_names = [varname for varname in variables] self.needed_variables = recorded_names self.template_kwds = {'_recorded_variables': self.recorded_variables} self.written_readonly_vars = { self.variables[varname] for varname in self.record_variables } self._enable_group_attributes() def resize(self, new_size): self.variables['N'].set_value(new_size) self.variables['t'].resize(new_size) for var in self.recorded_variables.values(): var.resize((new_size, self.n_indices)) def reinit(self): raise NotImplementedError() def __getitem__(self, item): dtype = get_dtype(item) if np.issubdtype(dtype, np.signedinteger): return StateMonitorView(self, item) elif isinstance(item, Sequence): index_array = np.array(item) if not np.issubdtype(index_array.dtype, np.signedinteger): raise TypeError('Index has to be an integer or a sequence ' 'of integers') return StateMonitorView(self, item) elif hasattr(item, '_indices'): # objects that support the indexing interface will return absolute # indices but here we need relative ones # TODO: How to we prevent the use of completely unrelated objects here? source_offset = getattr(self.source, '_offset', 0) return StateMonitorView(self, item._indices() - source_offset) else: raise TypeError('Cannot use object of type %s as an index' % type(item)) def __getattr__(self, item): # We do this because __setattr__ and __getattr__ are not active until # _group_attribute_access_active attribute is set, and if it is set, # then __getattr__ will not be called. Therefore, if getattr is called # with this name, it is because it hasn't been set yet and so this # method should raise an AttributeError to agree that it hasn't been # called yet. if item == '_group_attribute_access_active': raise AttributeError if not hasattr(self, '_group_attribute_access_active'): raise AttributeError if item in self.record_variables: var_dim = self.variables[item].dim return Quantity(self.variables[item].get_value().T, dim=var_dim, copy=True) elif item.endswith('_') and item[:-1] in self.record_variables: return self.variables[item[:-1]].get_value().T else: return Group.__getattr__(self, item) def __repr__(self): description = '<{classname}, recording {variables} from {source}>' return description.format(classname=self.__class__.__name__, variables=repr(self.record_variables), source=self.source.name) def record_single_timestep(self): ''' Records a single time step. Useful for recording the values at the end of the simulation -- otherwise a `StateMonitor` will not record the last simulated values since its ``when`` attribute defaults to ``'start'``, i.e. the last recording is at the *beginning* of the last time step. Notes ----- This function will only work if the `StateMonitor` has been already run, but a run with a length of ``0*ms`` does suffice. Examples -------- >>> from angela2 import * >>> G = NeuronGroup(1, 'dv/dt = -v/(5*ms) : 1') >>> G.v = 1 >>> mon = StateMonitor(G, 'v', record=True) >>> run(0.5*ms) >>> print(np.array_str(mon.v[:], precision=3)) [[ 1. 0.98 0.961 0.942 0.923]] >>> print(mon.t[:]) [ 0. 100. 200. 300. 400.] us >>> print(np.array_str(G.v[:], precision=3)) # last value had not been recorded [ 0.905] >>> mon.record_single_timestep() >>> print(mon.t[:]) [ 0. 100. 200. 300. 400. 500.] us >>> print(np.array_str(mon.v[:], precision=3)) [[ 1. 0.98 0.961 0.942 0.923 0.905]] ''' if self.codeobj is None: raise TypeError('Can only record a single time step after the ' 'network has been run once.') self.codeobj()
class NeuronGroup(Group, SpikeSource): ''' A group of neurons. Parameters ---------- N : int Number of neurons in the group. model : (str, `Equations`) The differential equations defining the group method : (str, function), optional The numerical integration method. Either a string with the name of a registered method (e.g. "euler") or a function that receives an `Equations` object and returns the corresponding abstract code. If no method is specified, a suitable method will be chosen automatically. threshold : str, optional The condition which produces spikes. Should be a single line boolean expression. reset : str, optional The (possibly multi-line) string with the code to execute on reset. refractory : {str, `Quantity`}, optional Either the length of the refractory period (e.g. ``2*ms``), a string expression that evaluates to the length of the refractory period after each spike (e.g. ``'(1 + rand())*ms'``), or a string expression evaluating to a boolean value, given the condition under which the neuron stays refractory after a spike (e.g. ``'v > -20*mV'``) events : dict, optional User-defined events in addition to the "spike" event defined by the ``threshold``. Has to be a mapping of strings (the event name) to strings (the condition) that will be checked. namespace: dict, optional A dictionary mapping identifier names to objects. If not given, the namespace will be filled in at the time of the call of `Network.run`, with either the values from the ``namespace`` argument of the `Network.run` method or from the local context, if no such argument is given. dtype : (`dtype`, `dict`), optional The `numpy.dtype` that will be used to store the values, or a dictionary specifying the type for variable names. If a value is not provided for a variable (or no value is provided at all), the preference setting `core.default_float_dtype` is used. codeobj_class : class, optional The `CodeObject` class to run code with. dt : `Quantity`, optional The time step to be used for the simulation. Cannot be combined with the `clock` argument. clock : `Clock`, optional The update clock to be used. If neither a clock, nor the `dt` argument is specified, the `defaultclock` will be used. order : int, optional The priority of of this group for operations occurring at the same time step and in the same scheduling slot. Defaults to 0. name : str, optional A unique name for the group, otherwise use ``neurongroup_0``, etc. Notes ----- `NeuronGroup` contains a `StateUpdater`, `Thresholder` and `Resetter`, and these are run at the 'groups', 'thresholds' and 'resets' slots (i.e. the values of their `when` attribute take these values). The `order` attribute will be passed down to the contained objects but can be set individually by setting the `order` attribute of the `state_updater`, `thresholder` and `resetter` attributes, respectively. ''' add_to_magic_network = True def __init__(self, N, model, method=('exact', 'euler', 'heun'), method_options=None, threshold=None, reset=None, refractory=False, events=None, namespace=None, dtype=None, dt=None, clock=None, order=0, name='neurongroup*', codeobj_class=None): Group.__init__(self, dt=dt, clock=clock, when='start', order=order, name=name) if dtype is None: dtype = {} if isinstance(dtype, MutableMapping): dtype['lastspike'] = self._clock.variables['t'].dtype self.codeobj_class = codeobj_class try: self._N = N = int(N) except ValueError: if isinstance(N, str): raise TypeError( "First NeuronGroup argument should be size, not equations." ) raise if N < 1: raise ValueError("NeuronGroup size should be at least 1, was " + str(N)) self.start = 0 self.stop = self._N ##### Prepare and validate equations if isinstance(model, str): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Check flags model.check_flags({ DIFFERENTIAL_EQUATION: ('unless refractory', ), PARAMETER: ('constant', 'shared', 'linked'), SUBEXPRESSION: ('shared', 'constant over dt') }) # add refractoriness #: The original equations as specified by the user (i.e. without #: the multiplied `int(not_refractory)` term for equations marked as #: `(unless refractory)`) self.user_equations = model if refractory is not False: model = add_refractoriness(model) uses_refractoriness = len(model) and any([ 'unless refractory' in eq.flags for eq in model.values() if eq.type == DIFFERENTIAL_EQUATION ]) # Separate subexpressions depending whether they are considered to be # constant over a time step or not model, constant_over_dt = extract_constant_subexpressions(model) self.equations = model self._linked_variables = set() logger.diagnostic("Creating NeuronGroup of size {self._N}, " "equations {self.equations}.".format(self=self)) if namespace is None: namespace = {} #: The group-specific namespace self.namespace = namespace # All of the following will be created in before_run #: The refractory condition or timespan self._refractory = refractory if uses_refractoriness and refractory is False: logger.warn( 'Model equations use the "unless refractory" flag but ' 'no refractory keyword was given.', 'no_refractory') #: The state update method selected by the user self.method_choice = method if events is None: events = {} if threshold is not None: if 'spike' in events: raise ValueError(("The NeuronGroup defines both a threshold " "and a 'spike' event")) events['spike'] = threshold # Setup variables # Since we have to create _spikespace and possibly other "eventspace" # variables, we pass the supported events self._create_variables(dtype, events=list(events.keys())) #: Events supported by this group self.events = events #: Code that is triggered on events (e.g. reset) self.event_codes = {} #: Checks the spike threshold (or abitrary user-defined events) self.thresholder = {} #: Reset neurons which have spiked (or perform arbitrary actions for #: user-defined events) self.resetter = {} for event_name in events.keys(): if not isinstance(event_name, str): raise TypeError(('Keys in the "events" dictionary have to be ' 'strings, not type %s.') % type(event_name)) if not _valid_event_name(event_name): raise TypeError(("The name '%s' cannot be used as an event " "name.") % event_name) # By default, user-defined events are checked after the threshold when = 'thresholds' if event_name == 'spike' else 'after_thresholds' # creating a Thresholder will take care of checking the validity # of the condition thresholder = Thresholder(self, event=event_name, when=when) self.thresholder[event_name] = thresholder self.contained_objects.append(thresholder) if reset is not None: self.run_on_event('spike', reset, when='resets') #: Performs numerical integration step self.state_updater = StateUpdater(self, method, method_options) self.contained_objects.append(self.state_updater) #: Update the "constant over a time step" subexpressions self.subexpression_updater = None if len(constant_over_dt): self.subexpression_updater = SubexpressionUpdater( self, constant_over_dt) self.contained_objects.append(self.subexpression_updater) if refractory is not False: # Set the refractoriness information self.variables['lastspike'].set_value(-1e4 * second) self.variables['not_refractory'].set_value(True) # Activate name attribute access self._enable_group_attributes() @property def spikes(self): ''' The spikes returned by the most recent thresholding operation. ''' # Note that we have to directly access the ArrayVariable object here # instead of using the Group mechanism by accessing self._spikespace # Using the latter would cut _spikespace to the length of the group spikespace = self.variables['_spikespace'].get_value() return spikespace[:spikespace[-1]] def state(self, name, use_units=True, level=0): try: return Group.state(self, name, use_units=use_units, level=level + 1) except KeyError as ex: if name in self._linked_variables: raise TypeError(('Link target for variable %s has not been ' 'set.') % name) else: raise ex def run_on_event(self, event, code, when='after_resets', order=None): ''' Run code triggered by a custom-defined event (see `NeuronGroup` documentation for the specification of events).The created `Resetter` object will be automatically added to the group, it therefore does not need to be added to the network manually. However, a reference to the object will be returned, which can be used to later remove it from the group or to set it to inactive. Parameters ---------- event : str The name of the event that should trigger the code code : str The code that should be executed when : str, optional The scheduling slot that should be used to execute the code. Defaults to `'after_resets'`. order : int, optional The order for operations in the same scheduling slot. Defaults to the order of the `NeuronGroup`. Returns ------- obj : `Resetter` A reference to the object that will be run. ''' if event not in self.events: error_message = "Unknown event '%s'." % event if event == 'spike': error_message += ' Did you forget to define a threshold?' raise ValueError(error_message) if event in self.resetter: raise ValueError(("Cannot add code for event '%s', code for this " "event has already been added.") % event) self.event_codes[event] = code resetter = Resetter(self, when=when, order=order, event=event) self.resetter[event] = resetter self.contained_objects.append(resetter) return resetter def set_event_schedule(self, event, when='after_thresholds', order=None): ''' Change the scheduling slot for checking the condition of an event. Parameters ---------- event : str The name of the event for which the scheduling should be changed when : str, optional The scheduling slot that should be used to check the condition. Defaults to `'after_thresholds'`. order : int, optional The order for operations in the same scheduling slot. Defaults to the order of the `NeuronGroup`. ''' if event not in self.thresholder: raise ValueError("Unknown event '%s'." % event) order = order if order is not None else self.order self.thresholder[event].when = when self.thresholder[event].order = order def __setattr__(self, key, value): # attribute access is switched off until this attribute is created by # _enable_group_attributes if not hasattr( self, '_group_attribute_access_active') or key in self.__dict__: object.__setattr__(self, key, value) elif key in self._linked_variables: if not isinstance(value, LinkedVariable): raise ValueError( ('Cannot set a linked variable directly, link ' 'it to another variable using "linked_var".')) linked_var = value.variable if isinstance(linked_var, DynamicArrayVariable): raise NotImplementedError(('Linking to variable %s is not ' 'supported, can only link to ' 'state variables of fixed ' 'size.') % linked_var.name) eq = self.equations[key] if eq.dim is not linked_var.dim: raise DimensionMismatchError( ('Unit of variable %s does not ' 'match its link target %s') % (key, linked_var.name)) if not isinstance(linked_var, Subexpression): var_length = len(linked_var) else: var_length = len(linked_var.owner) if value.index is not None: try: index_array = np.asarray(value.index) if not np.issubsctype(index_array.dtype, np.int): raise TypeError() except TypeError: raise TypeError(('The index for a linked variable has ' 'to be an integer array')) size = len(index_array) source_index = value.group.variables.indices[value.name] if source_index not in ('_idx', '0'): # we are indexing into an already indexed variable, # calculate the indexing into the target variable index_array = value.group.variables[ source_index].get_value()[index_array] if not index_array.ndim == 1 or size != len(self): raise TypeError( ('Index array for linked variable %s ' 'has to be a one-dimensional array of ' 'length %d, but has shape ' '%s') % (key, len(self), str(index_array.shape))) if min(index_array) < 0 or max(index_array) >= var_length: raise ValueError('Index array for linked variable %s ' 'contains values outside of the valid ' 'range [0, %d[' % (key, var_length)) self.variables.add_array('_%s_indices' % key, size=size, dtype=index_array.dtype, constant=True, read_only=True, values=index_array) index = '_%s_indices' % key else: if linked_var.scalar or (var_length == 1 and self._N != 1): index = '0' else: index = value.group.variables.indices[value.name] if index == '_idx': target_length = var_length else: target_length = len(value.group.variables[index]) # we need a name for the index that does not clash with # other names and a reference to the index new_index = '_' + value.name + '_index_' + index self.variables.add_reference(new_index, value.group, index) index = new_index if len(self) != target_length: raise ValueError( ('Cannot link variable %s to %s, the size of ' 'the target group does not match ' '(%d != %d). You can provide an indexing ' 'scheme with the "index" keyword to link ' 'groups with different sizes') % (key, linked_var.name, len(self), target_length)) self.variables.add_reference(key, value.group, value.name, index=index) log_msg = ('Setting {target}.{targetvar} as a link to ' '{source}.{sourcevar}').format( target=self.name, targetvar=key, source=value.variable.owner.name, sourcevar=value.variable.name) if index is not None: log_msg += '(using "{index}" as index variable)'.format( index=index) logger.diagnostic(log_msg) else: if isinstance(value, LinkedVariable): raise TypeError( ('Cannot link variable %s, it has to be marked ' 'as a linked variable with "(linked)" in the ' 'model equations.') % key) else: Group.__setattr__(self, key, value, level=1) def __getitem__(self, item): start, stop = to_start_stop(item, self._N) return Subgroup(self, start, stop) def _create_variables(self, user_dtype, events): ''' Create the variables dictionary for this `NeuronGroup`, containing entries for the equation variables and some standard entries. ''' self.variables = Variables(self) self.variables.add_constant('N', self._N) # Standard variables always present for event in events: self.variables.add_array('_{}space'.format(event), size=self._N + 1, dtype=np.int32, constant=False) # Add the special variable "i" which can be used to refer to the neuron index self.variables.add_arange('i', size=self._N, constant=True, read_only=True) # Add the clock variables self.variables.create_clock_variables(self._clock) for eq in self.equations.values(): dtype = get_dtype(eq, user_dtype) check_identifier_pre_post(eq.varname) if eq.type in (DIFFERENTIAL_EQUATION, PARAMETER): if 'linked' in eq.flags: # 'linked' cannot be combined with other flags if not len(eq.flags) == 1: raise SyntaxError(('The "linked" flag cannot be ' 'combined with other flags')) self._linked_variables.add(eq.varname) else: constant = 'constant' in eq.flags shared = 'shared' in eq.flags size = 1 if shared else self._N self.variables.add_array(eq.varname, size=size, dimensions=eq.dim, dtype=dtype, constant=constant, scalar=shared) elif eq.type == SUBEXPRESSION: self.variables.add_subexpression(eq.varname, dimensions=eq.dim, expr=str(eq.expr), dtype=dtype, scalar='shared' in eq.flags) else: raise AssertionError('Unknown type of equation: ' + eq.eq_type) # Add the conditional-write attribute for variables with the # "unless refractory" flag if self._refractory is not False: for eq in self.equations.values(): if (eq.type == DIFFERENTIAL_EQUATION and 'unless refractory' in eq.flags): not_refractory_var = self.variables['not_refractory'] var = self.variables[eq.varname] var.set_conditional_write(not_refractory_var) # Stochastic variables for xi in self.equations.stochastic_variables: self.variables.add_auxiliary_variable( xi, dimensions=(second**-0.5).dim) # Check scalar subexpressions for eq in self.equations.values(): if eq.type == SUBEXPRESSION and 'shared' in eq.flags: var = self.variables[eq.varname] for identifier in var.identifiers: if identifier in self.variables: if not self.variables[identifier].scalar: raise SyntaxError( ('Shared subexpression %s refers ' 'to non-shared variable %s.') % (eq.varname, identifier)) def before_run(self, run_namespace=None): # Check units self.equations.check_units(self, run_namespace=run_namespace) # Check that subexpressions that refer to stateful functions are labeled # as "constant over dt" check_subexpressions(self, self.equations, run_namespace) super(NeuronGroup, self).before_run(run_namespace=run_namespace) def _repr_html_(self): text = [ r'NeuronGroup "%s" with %d neurons.<br>' % (self.name, self._N) ] text.append(r'<b>Model:</b><nr>') text.append(sympy.latex(self.equations)) def add_event_to_text(event): if event == 'spike': event_header = 'Spiking behaviour' event_condition = 'Threshold condition' event_code = 'Reset statement(s)' else: event_header = 'Event "%s"' % event event_condition = 'Event condition' event_code = 'Executed statement(s)' condition = self.events[event] text.append( r'<b>%s:</b><ul style="list-style-type: none; margin-top: 0px;">' % event_header) text.append(r'<li><i>%s: </i>' % event_condition) text.append('<code>%s</code></li>' % str(condition)) statements = self.event_codes.get(event, None) if statements is not None: text.append(r'<li><i>%s:</i>' % event_code) if '\n' in str(statements): text.append('</br>') text.append(r'<code>%s</code></li>' % str(statements)) text.append('</ul>') if 'spike' in self.events: add_event_to_text('spike') for event in self.events: if event != 'spike': add_event_to_text(event) return '\n'.join(text)
class EventMonitor(Group, CodeRunner): ''' Record events from a `NeuronGroup` or another event source. The recorded events can be accessed in various ways: the attributes `~EventMonitor.i` and `~EventMonitor.t` store all the indices and event times, respectively. Alternatively, you can get a dictionary mapping neuron indices to event trains, by calling the `event_trains` method. Parameters ---------- source : `NeuronGroup`, `SpikeSource` The source of events to record. event : str The name of the event to record variables : str or sequence of str, optional Which variables to record at the time of the event (in addition to the index of the neuron). Can be the name of a variable or a list of names. record : bool, optional Whether or not to record each event in `i` and `t` (the `count` will always be recorded). Defaults to ``True``. when : str, optional When to record the events, by default records events in the same slot where the event is emitted. order : int, optional The priority of of this group for operations occurring at the same time step and in the same scheduling slot. Defaults to the order where the event is emitted + 1, i.e. it will be recorded directly afterwards. name : str, optional A unique name for the object, otherwise will use ``source.name+'_eventmonitor_0'``, etc. codeobj_class : class, optional The `CodeObject` class to run code with. See Also -------- SpikeMonitor ''' invalidates_magic_network = False add_to_magic_network = True def __init__(self, source, event, variables=None, record=True, when=None, order=None, name='eventmonitor*', codeobj_class=None): if not isinstance(source, SpikeSource): raise TypeError( ('%s can only monitor groups producing spikes ' '(such as NeuronGroup), but the given argument ' 'is of type %s.') % (self.__class__.__name__, type(source))) #: The source we are recording from self.source = source #: Whether to record times and indices of events self.record = record #: The array of event counts (length = size of target group) self.count = None del self.count # this is handled by the Variable mechanism if when is None: if order is not None: raise ValueError( 'Cannot specify order if when is not specified.') if hasattr(source, 'thresholder'): parent_obj = source.thresholder[event] else: parent_obj = source when = parent_obj.when order = parent_obj.order + 1 elif order is None: order = 0 #: The event that we are listening to self.event = event if variables is None: variables = {} elif isinstance(variables, str): variables = {variables} #: The additional variables that will be recorded self.record_variables = set(variables) for variable in variables: if variable not in source.variables: raise ValueError(("'%s' is not a variable of the recorded " "group" % variable)) if self.record: self.record_variables |= {'i', 't'} # Some dummy code so that code generation takes care of the indexing # and subexpressions code = [ '_to_record_%s = _source_%s' % (v, v) for v in self.record_variables ] code = '\n'.join(code) self.codeobj_class = codeobj_class # Since this now works for general events not only spikes, we have to # pass the information about which variable to use to the template, # it can not longer simply refer to "_spikespace" eventspace_name = '_{}space'.format(event) # Handle subgroups correctly start = getattr(source, 'start', 0) stop = getattr(source, 'stop', len(source)) source_N = getattr(source, '_source_N', len(source)) Nameable.__init__(self, name=name) self.variables = Variables(self) self.variables.add_reference(eventspace_name, source) for variable in self.record_variables: source_var = source.variables[variable] self.variables.add_reference('_source_%s' % variable, source, variable) self.variables.add_auxiliary_variable('_to_record_%s' % variable, dimensions=source_var.dim, dtype=source_var.dtype) self.variables.add_dynamic_array(variable, size=0, dimensions=source_var.dim, dtype=source_var.dtype, read_only=True) self.variables.add_arange('_source_idx', size=len(source)) self.variables.add_array('count', size=len(source), dtype=np.int32, read_only=True, index='_source_idx') self.variables.add_constant('_source_start', start) self.variables.add_constant('_source_stop', stop) self.variables.add_constant('_source_N', source_N) self.variables.add_array('N', size=1, dtype=np.int32, read_only=True, scalar=True) record_variables = { varname: self.variables[varname] for varname in self.record_variables } template_kwds = { 'eventspace_variable': source.variables[eventspace_name], 'record_variables': record_variables, 'record': self.record } needed_variables = {eventspace_name} | self.record_variables CodeRunner.__init__( self, group=self, code=code, template='spikemonitor', name=None, # The name has already been initialized clock=source.clock, when=when, order=order, needed_variables=needed_variables, template_kwds=template_kwds) self.variables.create_clock_variables(self._clock, prefix='_clock_') self.add_dependency(source) self.written_readonly_vars = { self.variables[varname] for varname in self.record_variables } self._enable_group_attributes() def resize(self, new_size): # Note that this does not set N, this has to be done in the template # since we use a restricted pointer to access it (which promises that # we only change the value through this pointer) for variable in self.record_variables: self.variables[variable].resize(new_size) def reinit(self): ''' Clears all recorded spikes ''' raise NotImplementedError() @property def it(self): ''' Returns the pair (`i`, `t`). ''' if not self.record: raise AttributeError('Indices and times have not been recorded.' 'Set the record argument to True to record ' 'them.') return self.i, self.t @property def it_(self): ''' Returns the pair (`i`, `t_`). ''' if not self.record: raise AttributeError('Indices and times have not been recorded.' 'Set the record argument to True to record ' 'them.') return self.i, self.t_ def _values_dict(self, first_pos, sort_indices, used_indices, var): sorted_values = self.state(var, use_units=False)[sort_indices] dim = self.variables[var].dim event_values = {} current_pos = 0 # position in the all_indices array for idx in range(len(self.source)): if current_pos < len( used_indices) and used_indices[current_pos] == idx: if current_pos < len(used_indices) - 1: event_values[idx] = Quantity(sorted_values[ first_pos[current_pos]:first_pos[current_pos + 1]], dim=dim, copy=False) else: event_values[idx] = Quantity( sorted_values[first_pos[current_pos]:], dim=dim, copy=False) current_pos += 1 else: event_values[idx] = Quantity([], dim=dim) return event_values def values(self, var): ''' Return a dictionary mapping neuron indices to arrays of variable values at the time of the events (sorted by time). Parameters ---------- var : str The name of the variable. Returns ------- values : dict Dictionary mapping each neuron index to an array of variable values at the time of the events Examples -------- >>> from angela2 import * >>> G = NeuronGroup(2, """counter1 : integer ... counter2 : integer ... max_value : integer""", ... threshold='counter1 >= max_value', ... reset='counter1 = 0') >>> G.run_regularly('counter1 += 1; counter2 += 1') # doctest: +ELLIPSIS CodeRunner(...) >>> G.max_value = [50, 100] >>> mon = EventMonitor(G, event='spike', variables='counter2') >>> run(10*ms) >>> counter2_values = mon.values('counter2') >>> print(counter2_values[0]) [ 50 100] >>> print(counter2_values[1]) [100] ''' if not self.record: raise AttributeError('Indices and times have not been recorded.' 'Set the record argument to True to record ' 'them.') indices = self.i[:] # We have to make sure that the sort is stable, otherwise our spike # times do not necessarily remain sorted. sort_indices = np.argsort(indices, kind='mergesort') used_indices, first_pos = np.unique(self.i[:][sort_indices], return_index=True) return self._values_dict(first_pos, sort_indices, used_indices, var) def all_values(self): ''' Return a dictionary mapping recorded variable names (including ``t``) to a dictionary mapping neuron indices to arrays of variable values at the time of the events (sorted by time). This is equivalent to (but more efficient than) calling `values` for each variable and storing the result in a dictionary. Returns ------- all_values : dict Dictionary mapping variable names to dictionaries which themselves are mapping neuron indicies to arrays of variable values at the time of the events. Examples -------- >>> from angela2 import * >>> G = NeuronGroup(2, """counter1 : integer ... counter2 : integer ... max_value : integer""", ... threshold='counter1 >= max_value', ... reset='counter1 = 0') >>> G.run_regularly('counter1 += 1; counter2 += 1') # doctest: +ELLIPSIS CodeRunner(...) >>> G.max_value = [50, 100] >>> mon = EventMonitor(G, event='spike', variables='counter2') >>> run(10*ms) >>> all_values = mon.all_values() >>> print(all_values['counter2'][0]) [ 50 100] >>> print(all_values['t'][1]) [ 9.9] ms ''' if not self.record: raise AttributeError('Indices and times have not been recorded.' 'Set the record argument to True to record ' 'them.') indices = self.i[:] sort_indices = np.argsort(indices) used_indices, first_pos = np.unique(self.i[:][sort_indices], return_index=True) all_values_dict = {} for varname in self.record_variables - {'i'}: all_values_dict[varname] = self._values_dict( first_pos, sort_indices, used_indices, varname) return all_values_dict def event_trains(self): ''' Return a dictionary mapping event indices to arrays of event times. Equivalent to calling ``values('t')``. Returns ------- event_trains : dict Dictionary that stores an array with the event times for each neuron index. See Also -------- SpikeMonitor.spike_trains ''' return self.values('t') @property def num_events(self): ''' Returns the total number of recorded events. ''' return self.N[:] def __repr__(self): description = '<{classname}, recording event "{event}" from {source}>' return description.format(classname=self.__class__.__name__, event=self.event, source=self.group.name)
class Thresholder(CodeRunner): ''' The `CodeRunner` that applies the threshold condition to the state variables of a `NeuronGroup` at every timestep and sets its ``spikes`` and ``refractory_until`` attributes. ''' def __init__(self, group, when='thresholds', event='spike'): self.event = event if group._refractory is False or event != 'spike': template_kwds = {'_uses_refractory': False} needed_variables = [] else: template_kwds = {'_uses_refractory': True} needed_variables = ['t', 'not_refractory', 'lastspike'] # Since this now works for general events not only spikes, we have to # pass the information about which variable to use to the template, # it can not longer simply refer to "_spikespace" eventspace_name = '_{}space'.format(event) template_kwds['eventspace_variable'] = group.variables[eventspace_name] needed_variables.append(eventspace_name) self.variables = Variables(self) self.variables.add_auxiliary_variable('_cond', dtype=np.bool) CodeRunner.__init__( self, group, 'threshold', code='', # will be set in update_abstract_code clock=group.clock, when=when, order=group.order, name=group.name + '_thresholder*', needed_variables=needed_variables, template_kwds=template_kwds) def update_abstract_code(self, run_namespace): code = self.group.events[self.event] # Raise a useful error message when the user used a angela1 syntax if not isinstance(code, str): if isinstance(code, Quantity): t = 'a quantity' else: t = '%s' % type(code) error_msg = 'Threshold condition has to be a string, not %s.' % t if self.event == 'spike': try: vm_var = _guess_membrane_potential(self.group.equations) except AttributeError: # not a group with equations... vm_var = None if vm_var is not None: error_msg += " Probably you intended to use '%s > ...'?" % vm_var raise TypeError(error_msg) self.user_code = '_cond = ' + code identifiers = get_identifiers(code) variables = self.group.resolve_all(identifiers, run_namespace, user_identifiers=identifiers) if not is_boolean_expression(code, variables): raise TypeError(('Threshold condition "%s" is not a boolean ' 'expression') % code) if self.group._refractory is False or self.event != 'spike': self.abstract_code = '_cond = %s' % code else: self.abstract_code = '_cond = (%s) and not_refractory' % code