def __init__(self, model, input_var, input, output_var, output, dt, n_samples=30, method=None, reset=None, refractory=False, threshold=None, level=0, param_init=None, t_start=0 * second): """Initialize the fitter.""" super().__init__(dt, model, input, output, input_var, output_var, n_samples, threshold, reset, refractory, method, param_init) self.output = Quantity(output) self.output_ = array(output) if output_var not in self.model.names: raise NameError("%s is not a model variable" % output_var) if output.shape != input.shape: raise ValueError("Input and output must have the same size") # Replace input variable by TimedArray output_traces = TimedArray(output.transpose(), dt=dt) output_dim = get_dimensions(output) squared_output_dim = ('1' if output_dim is DIMENSIONLESS else repr( output_dim**2)) error_eqs = Equations('total_error : {}'.format(squared_output_dim)) self.model = self.model + error_eqs self.t_start = t_start if param_init: for param, val in param_init.items(): if not (param in self.model.identifiers or param in self.model.names): raise ValueError("%s is not a model variable or an " "identifier in the model" % param) self.param_init = param_init self.simulator = None
def test_get_dtype(): ''' Check the utility function get_dtype ''' eqs = Equations('''dv/dt = -v / (10*ms) : volt x : 1 b : boolean n : integer''') # Test standard dtypes assert get_dtype(eqs['v']) == brian_prefs['core.default_float_dtype'] assert get_dtype(eqs['x']) == brian_prefs['core.default_float_dtype'] assert get_dtype(eqs['n']) == brian_prefs['core.default_integer_dtype'] assert get_dtype(eqs['b']) == np.bool # Test a changed default (float) dtype assert get_dtype(eqs['v'], np.float32) == np.float32, get_dtype( eqs['v'], np.float32) assert get_dtype(eqs['x'], np.float32) == np.float32 # integer and boolean variables should be unaffected assert get_dtype(eqs['n']) == brian_prefs['core.default_integer_dtype'] assert get_dtype(eqs['b']) == np.bool # Explicitly provide a dtype for some variables dtypes = {'v': np.float32, 'x': np.float64, 'n': np.int64} for varname in dtypes: assert get_dtype(eqs[varname], dtypes) == dtypes[varname] # Not setting some dtypes should use the standard dtypes dtypes = {'n': np.int64} assert get_dtype(eqs['n'], dtypes) == np.int64 assert get_dtype(eqs['v'], dtypes) == brian_prefs['core.default_float_dtype'] # Test that incorrect types raise an error # incorrect general dtype assert_raises(TypeError, lambda: get_dtype(eqs['v'], np.int32)) # incorrect specific types assert_raises(TypeError, lambda: get_dtype(eqs['v'], {'v': np.int32})) assert_raises(TypeError, lambda: get_dtype(eqs['n'], {'n': np.float32})) assert_raises(TypeError, lambda: get_dtype(eqs['b'], {'b': np.int32}))
def simulate(IXmean): common_params = { # Parameters common to all neurons. 'C': 100.*b2.pfarad, 'tau_m': 10.*b2.ms, 'EL': -60.*b2.mV, 'DeltaT': 2.*b2.mV, 'Vreset': -65., # *b2.mV 'VTmean': -50.*b2.mV } common_params['gL'] = common_params['C'] / common_params['tau_m'] E_cell_params = dict(common_params, **{'Ncells': num_E_cells, 'IXmean': IXmean, # 30 'IXsd': 20*b2.pA}) eqs = Equations( """ Im = IX + gL * (EL - vm) + gL * DeltaT * exp((vm - VT) / DeltaT) : amp VT : volt IX : amp dvm/dt = Im / C : volt """ ) E_cells = b2.NeuronGroup(E_cell_params['Ncells'], model=eqs, method=integration_method, threshold="vm > 0.*mV", reset="vm={}*mV".format(E_cell_params['Vreset']), refractory="vm > 0.*mV", namespace=common_params) # Initialise random parameters. E_cells.VT = E_cell_params['VTmean'] E_cells.IX = E_cell_params['IXmean'] E_cells.vm = - 60 * b2.mV spike_monitor_E = b2.SpikeMonitor(E_cells) state_monitor_E = None if record_voltages: state_monitor_E = b2.StateMonitor(E_cells, "vm", record=True, dt=dt) net = b2.Network(E_cells) if record_voltages: net.add(state_monitor_E) net.add(spike_monitor_E) print('Simulation running...') start_time = time.time() b2.run(sim_duration*b2.ms) duration = time.time() - start_time print('Simulation time:', duration, 'seconds') return spike_monitor_E, state_monitor_E
def __init__(self, N, equations, method=euler, threshold=None, reset=None, dtype=None, language=None, clock=None, name=None, level=0): BrianObject.__init__(self, when=clock, name=name) ##### VALIDATE ARGUMENTS AND STORE ATTRIBUTES self.method = method self.level = level = int(level) try: self.N = N = int(N) except ValueError: if isinstance(N, str): raise TypeError("First NeuronGroup argument should be size, not equations.") raise if N<1: raise ValueError("NeuronGroup size should be at least 1, was "+str(N)) # Validate equations if isinstance(equations, basestring): equations = Equations(equations, level=level+1) if not isinstance(equations, Equations): raise ValueError(('equations has to be a string or an Equations ' 'object, is "%s" instead.') % type(equations)) # add refractoriness equations = add_refractoriness(equations) self.equations = equations logger.debug("Creating NeuronGroup of size {self.N}, " "equations {self.equations}.".format(self=self)) # Check flags equations.check_flags({DIFFERENTIAL_EQUATION: ('active'), PARAMETER: ('constant')}) # Set dtypes and units self.prepare_dtypes(dtype=dtype) self.units = dict((var, equations.units[var]) for var in equations.equations.keys()) # Allocate memory (TODO: this should be refactored somewhere at some point) self.allocate_memory() #: The array of spikes from the most recent threshold operation self.spikes = array([], dtype=int) # Set these for documentation purposes #: Performs numerical integration step self.state_updater = None #: Performs thresholding step, sets the value of `spikes` self.thresholder = None #: Resets neurons which have spiked (`spikes`) self.resetter = None # Code generation (TODO: this should be refactored and modularised) # Temporary, set default language to Python if language is None: language = PythonLanguage() self.language = language self.create_state_updater() self.create_thresholder(threshold, level=level+1) self.create_resetter(reset, level=level+1) # Creation of contained_objects that do the work self.contained_objects.append(self.state_updater) if self.thresholder is not None: self.contained_objects.append(self.thresholder) if self.resetter is not None: self.contained_objects.append(self.resetter) # Activate name attribute access Group.__init__(self)
def __init__(self, N, model, method=('exact', 'euler', 'heun'), method_options=None, threshold=None, reset=None, refractory=False, events=None, namespace=None, dtype=None, dt=None, clock=None, order=0, name='neurongroup*', codeobj_class=None): Group.__init__(self, dt=dt, clock=clock, when='start', order=order, name=name) if dtype is None: dtype = {} if isinstance(dtype, collections.MutableMapping): dtype['lastspike'] = self._clock.variables['t'].dtype self.codeobj_class = codeobj_class try: self._N = N = int(N) except ValueError: if isinstance(N, str): raise TypeError( "First NeuronGroup argument should be size, not equations." ) raise if N < 1: raise ValueError("NeuronGroup size should be at least 1, was " + str(N)) self.start = 0 self.stop = self._N ##### Prepare and validate equations if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Check flags model.check_flags({ DIFFERENTIAL_EQUATION: ('unless refractory', ), PARAMETER: ('constant', 'shared', 'linked'), SUBEXPRESSION: ('shared', 'constant over dt') }) # add refractoriness #: The original equations as specified by the user (i.e. without #: the multiplied `int(not_refractory)` term for equations marked as #: `(unless refractory)`) self.user_equations = model if refractory is not False: model = add_refractoriness(model) uses_refractoriness = len(model) and any([ 'unless refractory' in eq.flags for eq in model.values() if eq.type == DIFFERENTIAL_EQUATION ]) # Separate subexpressions depending whether they are considered to be # constant over a time step or not model, constant_over_dt = extract_constant_subexpressions(model) self.equations = model self._linked_variables = set() logger.diagnostic("Creating NeuronGroup of size {self._N}, " "equations {self.equations}.".format(self=self)) if namespace is None: namespace = {} #: The group-specific namespace self.namespace = namespace # All of the following will be created in before_run #: The refractory condition or timespan self._refractory = refractory if uses_refractoriness and refractory is False: logger.warn( 'Model equations use the "unless refractory" flag but ' 'no refractory keyword was given.', 'no_refractory') #: The state update method selected by the user self.method_choice = method if events is None: events = {} if threshold is not None: if 'spike' in events: raise ValueError(("The NeuronGroup defines both a threshold " "and a 'spike' event")) events['spike'] = threshold # Setup variables # Since we have to create _spikespace and possibly other "eventspace" # variables, we pass the supported events self._create_variables(dtype, events=list(events.keys())) #: Events supported by this group self.events = events #: Code that is triggered on events (e.g. reset) self.event_codes = {} #: Checks the spike threshold (or abitrary user-defined events) self.thresholder = {} #: Reset neurons which have spiked (or perform arbitrary actions for #: user-defined events) self.resetter = {} for event_name in events.keys(): if not isinstance(event_name, basestring): raise TypeError(('Keys in the "events" dictionary have to be ' 'strings, not type %s.') % type(event_name)) if not _valid_event_name(event_name): raise TypeError(("The name '%s' cannot be used as an event " "name.") % event_name) # By default, user-defined events are checked after the threshold when = 'thresholds' if event_name == 'spike' else 'after_thresholds' # creating a Thresholder will take care of checking the validity # of the condition thresholder = Thresholder(self, event=event_name, when=when) self.thresholder[event_name] = thresholder self.contained_objects.append(thresholder) if reset is not None: self.run_on_event('spike', reset, when='resets') #: Performs numerical integration step self.state_updater = StateUpdater(self, method, method_options) self.contained_objects.append(self.state_updater) #: Update the "constant over a time step" subexpressions self.subexpression_updater = None if len(constant_over_dt): self.subexpression_updater = SubexpressionUpdater( self, constant_over_dt) self.contained_objects.append(self.subexpression_updater) if refractory is not False: # Set the refractoriness information self.variables['lastspike'].set_value(-1e4 * second) self.variables['not_refractory'].set_value(True) # Activate name attribute access self._enable_group_attributes()
class Synapses(Group): ''' Class representing synaptic connections. Creating a new `Synapses` object does by default not create any synapses -- you either have to provide the `connect` argument or call the `Synapses.connect` method for that. Parameters ---------- source : `SpikeSource` The source of spikes, e.g. a `NeuronGroup`. target : `Group`, optional The target of the spikes, typically a `NeuronGroup`. If none is given, the same as `source` model : {`str`, `Equations`}, optional The model equations for the synapses. pre : {str, dict}, optional The code that will be executed after every pre-synaptic spike. Can be either a single (possibly multi-line) string, or a dictionary mapping pathway names to code strings. In the first case, the pathway will be called ``pre`` and made available as an attribute of the same name. In the latter case, the given names will be used as the pathway/attribute names. Each pathway has its own code and its own delays. post : {str, dict}, optional The code that will be executed after every post-synaptic spike. Same conventions as for `pre`, the default name for the pathway is ``post``. connect : {str, bool}. optional Determines whether any actual synapses are created. ``False`` (the default) means not to create any synapses, ``True`` means to create synapses between all source/target pairs. Also accepts a string expression that evaluates to ``True`` for every synapse that should be created, e.g. ``'i == j'`` for a one-to-one connectivity. See `Synapses.connect` for more details. delay : {`Quantity`, dict}, optional The delay for the "pre" pathway (same for all synapses) or a dictionary mapping pathway names to delays. If a delay is specified in this way for a pathway, it is stored as a single scalar value. It can still be changed afterwards, but only to a single scalar value. If you want to have delays that vary across synapses, do not use the keyword argument, but instead set the delays via the attribute of the pathway, e.g. ``S.pre.delay = ...`` (or ``S.delay = ...`` as an abbreviation), ``S.post.delay = ...``, etc. namespace : dict, optional A dictionary mapping identifier names to objects. If not given, the namespace will be filled in at the time of the call of `Network.run`, with either the values from the ``network`` argument of the `Network.run` method or from the local context, if no such argument is given. dtype : `dtype`, optional The standard datatype for all state variables. Defaults to `core.default_scalar_type`. codeobj_class : class, optional The `CodeObject` class to use to run code. clock : `Clock`, optional The clock to use. method : {str, `StateUpdateMethod`}, optional The numerical integration method to use. If none is given, an appropriate one is automatically determined. name : str, optional The name for this object. If none is given, a unique name of the form ``synapses``, ``synapses_1``, etc. will be automatically chosen. ''' def __init__(self, source, target=None, model=None, pre=None, post=None, connect=False, delay=None, namespace=None, dtype=None, codeobj_class=None, clock=None, method=None, name='synapses*'): self._N = 0 Group.__init__(self, when=clock, name=name) self.codeobj_class = codeobj_class self.source = weakref.proxy(source) if target is None: self.target = self.source else: self.target = weakref.proxy(target) ##### Prepare and validate equations if model is None: model = '' if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Check flags model.check_flags({DIFFERENTIAL_EQUATION: ['event-driven'], STATIC_EQUATION: ['summed'], PARAMETER: ['constant']}) # Separate the equations into event-driven and continuously updated # equations event_driven = [] continuous = [] for single_equation in model.itervalues(): if 'event-driven' in single_equation.flags: event_driven.append(single_equation) else: continuous.append(single_equation) # Add the lastupdate variable, used by event-driven equations continuous.append(SingleEquation(PARAMETER, 'lastupdate', second)) if len(event_driven): self.event_driven = Equations(event_driven) else: self.event_driven = None self.equations = Equations(continuous) # Setup the namespace self._given_namespace = namespace self.namespace = create_namespace(namespace) self._queues = {} self._delays = {} # Setup variables self._create_variables() #: Set of `Variable` objects that should be resized when the #: number of synapses changes self._registered_variables = set() for varname, var in self.variables.iteritems(): if isinstance(var, DynamicArrayVariable): # Register the array with the `SynapticItemMapping` object so # it gets automatically resized self.register_variable(var) #: List of names of all updaters, e.g. ['pre', 'post'] self._synaptic_updaters = [] #: List of all `SynapticPathway` objects self._pathways = [] for prepost, argument in zip(('pre', 'post'), (pre, post)): if not argument: continue if isinstance(argument, basestring): self._add_updater(argument, prepost) elif isinstance(argument, collections.Mapping): for key, value in argument.iteritems(): if not isinstance(key, basestring): err_msg = ('Keys for the "{}" argument' 'have to be strings, got ' '{} instead.').format(prepost, type(key)) raise TypeError(err_msg) self._add_updater(value, prepost, objname=key) # If we have a pathway called "pre" (the most common use case), provide # direct access to its delay via a delay attribute (instead of having # to use pre.delay) if 'pre' in self._synaptic_updaters: self.variables.add_reference('delay', self.pre.variables['delay']) if delay is not None: if isinstance(delay, Quantity): if not 'pre' in self._synaptic_updaters: raise ValueError(('Cannot set delay, no "pre" pathway exists.' 'Use a dictionary if you want to set the ' 'delay for a pathway with a different name.')) delay = {'pre': delay} if not isinstance(delay, collections.Mapping): raise TypeError('Delay argument has to be a quantity or a ' 'dictionary, is type %s instead.' % type(delay)) for pathway, pathway_delay in delay.iteritems(): if not pathway in self._synaptic_updaters: raise ValueError(('Cannot set the delay for pathway ' '"%s": unknown pathway.') % pathway) if not isinstance(pathway_delay, Quantity): raise TypeError(('Cannot set the delay for pathway "%s": ' 'expected a quantity, got %s instead.') % (pathway, type(pathway_delay))) if pathway_delay.size != 1: raise TypeError(('Cannot set the delay for pathway "%s": ' 'expected a scalar quantity, got a ' 'quantity with shape %s instead.') % str(pathway_delay.shape)) fail_for_dimension_mismatch(pathway_delay, second, ('Delay has to be ' 'specified in units ' 'of seconds')) updater = getattr(self, pathway) # For simplicity, store the delay as a one-element array # so that for example updater._delays[:] works. updater._delays.resize(1) updater._delays.set_value(float(pathway_delay)) updater._delays.scalar = True # Do not resize the scalar delay variable when adding synapses self.unregister_variable(updater._delays) #: Performs numerical integration step self.state_updater = StateUpdater(self, method) self.contained_objects.append(self.state_updater) #: "Summed variable" mechanism -- sum over all synapses of a #: pre-/postsynaptic target self.summed_updaters = {} # We want to raise an error if the same variable is updated twice # using this mechanism. This could happen if the Synapses object # connected a NeuronGroup to itself since then all variables are # accessible as var_pre and var_post. summed_targets = set() for single_equation in self.equations.itervalues(): if 'summed' in single_equation.flags: varname = single_equation.varname if not (varname.endswith('_pre') or varname.endswith('_post')): raise ValueError(('The summed variable "%s" does not end ' 'in "_pre" or "_post".') % varname) if not varname in self.variables: raise ValueError(('The summed variable "%s" does not refer' 'do any known variable in the ' 'target group.') % varname) if varname.endswith('_pre'): summed_target = self.source orig_varname = varname[:-4] else: summed_target = self.target orig_varname = varname[:-5] target_eq = getattr(summed_target, 'equations', {}).get(orig_varname, None) if target_eq is None or target_eq.type != PARAMETER: raise ValueError(('The summed variable "%s" needs a ' 'corresponding parameter "%s" in the ' 'target group.') % (varname, orig_varname)) fail_for_dimension_mismatch(self.variables['_summed_'+varname].unit, self.variables[varname].unit, ('Summed variables need to have ' 'the same units in Synapses ' 'and the target group')) if self.variables[varname] in summed_targets: raise ValueError(('The target variable "%s" is already ' 'updated by another summed ' 'variable') % orig_varname) summed_targets.add(self.variables[varname]) updater = SummedVariableUpdater(single_equation.expr, varname, self, summed_target) self.summed_updaters[varname] = updater self.contained_objects.append(updater) # Do an initial connect, if requested if not isinstance(connect, (bool, basestring)): raise TypeError(('"connect" keyword has to be a boolean value or a ' 'string, is type %s instead.' % type(connect))) self._initial_connect = connect if not connect is False: self.connect(connect, level=1) # Activate name attribute access self._enable_group_attributes() def __len__(self): return self._N def before_run(self, namespace): self.lastupdate = self.clock.t super(Synapses, self).before_run(namespace) def _add_updater(self, code, prepost, objname=None): ''' Add a new target updater. Users should call `add_pre` or `add_post` instead. Parameters ---------- code : str The abstract code that should be executed on pre-/postsynaptic spikes. prepost : {'pre', 'post'} Whether the code is triggered by presynaptic or postsynaptic spikes objname : str, optional A name for the object, see `SynapticPathway` for more details. Returns ------- objname : str The final name for the object. Equals `objname` if it was explicitly given (and did not end in a wildcard character). ''' if prepost == 'pre': spike_group, group_name = self.source, 'Source' elif prepost == 'post': spike_group, group_name = self.target, 'Target' else: raise ValueError(('"prepost" argument has to be "pre" or "post", ' 'is "%s".') % prepost) if not isinstance(spike_group, SpikeSource) or not hasattr(spike_group, 'clock'): raise TypeError(('%s has to be a SpikeSource with spikes and' ' clock attribute. Is type %r instead') % (group_name, type(spike_group))) updater = SynapticPathway(self, code, prepost, objname) objname = updater.objname if hasattr(self, objname): raise ValueError(('Cannot add updater with name "{name}", synapses ' 'object already has an attribute with this ' 'name.').format(name=objname)) setattr(self, objname, updater) self._synaptic_updaters.append(objname) self._pathways.append(updater) self.contained_objects.append(updater) return objname def _create_variables(self, dtype=None): ''' Create the variables dictionary for this `Synapses`, containing entries for the equation variables and some standard entries. ''' if dtype is None: dtype = defaultdict(lambda: brian_prefs['core.default_scalar_dtype']) elif isinstance(dtype, np.dtype): dtype = defaultdict(lambda: dtype) elif not hasattr(dtype, '__getitem__'): raise TypeError(('Cannot use type %s as dtype ' 'specification') % type(dtype)) self.variables = Variables(self) # Standard variables always present self.variables.add_dynamic_array('_synaptic_pre', size=0, unit=Unit(1), dtype=np.int32, constant_size=True) self.variables.add_dynamic_array('_synaptic_post', size=0, unit=Unit(1), dtype=np.int32, constant_size=True) self.variables.add_reference('i', self.source.variables['i'], index='_presynaptic_idx') self.variables.add_reference('j', self.target.variables['i'], index='_postsynaptic_idx') # We have to make a distinction here between the indices # and the arrays (even though they refer to the same object) # the synaptic propagation template would otherwise overwrite # synaptic_post in its namespace with the value of the # postsynaptic index, leading to errors for the next # propagation. self.variables.add_reference('_presynaptic_idx', self.variables['_synaptic_pre']) self.variables.add_reference('_postsynaptic_idx', self.variables['_synaptic_post']) # Add the standard variables self.variables.add_clock_variables(self.clock) self.variables.add_attribute_variable('N', Unit(1), self, '_N', constant=True) for eq in itertools.chain(self.equations.itervalues(), self.event_driven.itervalues() if self.event_driven is not None else []): if eq.type in (DIFFERENTIAL_EQUATION, PARAMETER): constant = ('constant' in eq.flags) # We are dealing with dynamic arrays here, code generation # shouldn't directly access the specifier.array attribute but # use specifier.get_value() to get a reference to the underlying # array self.variables.add_dynamic_array(eq.varname, size=0, unit=eq.unit, dtype=dtype[eq.varname], constant=constant, is_bool=eq.is_bool) elif eq.type == STATIC_EQUATION: if 'summed' in eq.flags: # Give a special name to the subexpression for summed # variables to avoid confusion with the pre/postsynaptic # target variable varname = '_summed_'+eq.varname else: varname = eq.varname self.variables.add_subexpression(varname, unit=eq.unit, expr=str(eq.expr), is_bool=eq.is_bool) else: raise AssertionError('Unknown type of equation: ' + eq.eq_type) # Stochastic variables for xi in self.equations.stochastic_variables: self.variables.add_auxiliary_variable(xi, unit=second**-0.5) # Add all the pre and post variables with _pre and _post suffixes for name, var in getattr(self.source, 'variables', {}).iteritems(): self.variables.add_reference(name + '_pre', var, index='_presynaptic_idx') for name, var in getattr(self.target, 'variables', {}).iteritems(): self.variables.add_reference(name + '_post', var, index='_postsynaptic_idx') # Also add all the post variables without a suffix -- note that a # reference will never overwrite the name of an existing name self.variables.add_reference(name, var, index='_postsynaptic_idx') def connect(self, pre_or_cond, post=None, p=1., n=1, level=0): ''' Add synapses. The first argument can be either a presynaptic index (int or array) or a condition for synapse creation in the form of a string that evaluates to a boolean value (or directly a boolean value). If it is given as an index, also `post` has to be present. A string condition will be evaluated for all pre-/postsynaptic indices, which can be referred to as `i` and `j`. Parameters ---------- pre_or_cond : {int, ndarray of int, bool, str} The presynaptic neurons (in the form of an index or an array of indices) or a boolean value or a string that evaluates to a boolean value. If it is an index, then also `post` has to be given. post_neurons : {int, ndarray of int), optional GroupIndices of neurons from the target group. Non-optional if one or more presynaptic indices have been given. p : float, optional The probability to create `n` synapses wherever the condition given as `pre_or_cond` evaluates to true or for the given pre/post indices. n : int, optional The number of synapses to create per pre/post connection pair. Defaults to 1. Examples -------- >>> from brian2 import * >>> import numpy as np >>> G = NeuronGroup(10, 'dv/dt = -v / tau : 1', threshold='v>1', reset='v=0') >>> S = Synapses(G, G, 'w:1', pre='v+=w') >>> S.connect('i != j') # all-to-all but no self-connections >>> S.connect(0, 0) # connect neuron 0 to itself >>> S.connect(np.array([1, 2]), np.array([2, 1])) # connect 1->2 and 2->1 >>> S.connect(True) # connect all-to-all >>> S.connect('i != j', p=0.1) # Connect neurons with 10% probability, exclude self-connections >>> S.connect('i == j', n=2) # Connect all neurons to themselves with 2 synapses ''' if not isinstance(pre_or_cond, (bool, basestring)): pre_or_cond = np.asarray(pre_or_cond) if not np.issubdtype(pre_or_cond.dtype, np.int): raise TypeError(('Presynaptic indices have to be given as ' 'integers, are type %s instead.') % pre_or_cond.dtype) post = np.asarray(post) if not np.issubdtype(post.dtype, np.int): raise TypeError(('Presynaptic indices can only be combined ' 'with postsynaptic integer indices))')) if isinstance(n, basestring): raise TypeError(('Indices cannot be combined with a string' 'expression for n. Either use an array/scalar ' 'for n, or a string expression for the ' 'connections')) i, j, n = np.broadcast_arrays(pre_or_cond, post, n) if i.ndim > 1: raise ValueError('Can only use 1-dimensional indices') self._add_synapses(i, j, n, p, level=level+1) elif isinstance(pre_or_cond, (basestring, bool)): if pre_or_cond is False: return # nothing to do... elif pre_or_cond is True: # TODO: This should not be handled with the general mechanism pre_or_cond = 'True' if post is not None: raise ValueError('Cannot give a postsynaptic index when ' 'using a string expression') if not isinstance(n, (int, basestring)): raise TypeError('n has to be an integer or a string evaluating ' 'to an integer, is type %s instead.' % type(n)) if not isinstance(p, (float, basestring)): raise TypeError('p has to be a float or a string evaluating ' 'to an float, is type %s instead.' % type(n)) self._add_synapses(None, None, n, p, condition=pre_or_cond, level=level+1) else: raise TypeError(('First argument has to be an index or a ' 'string, is %s instead.') % type(pre_or_cond)) def _resize(self, number): if not isinstance(number, int): raise TypeError(('Expected an integer number got {} ' 'instead').format(type(number))) if number < self._N: raise ValueError(('Cannot reduce number of synapses, ' '{} < {}').format(number, len(self))) for variable in self._registered_variables: variable.resize(number) self._N = number def register_variable(self, variable): ''' Register a `DynamicArray` to be automatically resized when the size of the indices change. Called automatically when a `SynapticArrayVariable` specifier is created. ''' if not hasattr(variable, 'resize'): raise TypeError(('Variable of type {} does not have a resize ' 'method, cannot register it with the synaptic ' 'indices.').format(type(variable))) self._registered_variables.add(variable) def unregister_variable(self, variable): ''' Unregister a `DynamicArray` from the automatic resizing mechanism. ''' self._registered_variables.remove(variable) def _add_synapses(self, sources, targets, n, p, condition=None, level=0): if condition is None: sources = np.atleast_1d(sources).astype(np.int32) targets = np.atleast_1d(targets).astype(np.int32) n = np.atleast_1d(n) p = np.atleast_1d(p) if not len(p) == 1 or p != 1: use_connections = np.random.rand(len(sources)) < p sources = sources[use_connections] targets = targets[use_connections] n = n[use_connections] sources = sources.repeat(n) targets = targets.repeat(n) new_synapses = len(sources) old_N = len(self) new_N = old_N + new_synapses self._resize(new_N) # Deal with subgroups if '_sub_idx' in self.source.variables: real_sources = self.source.variables['_sub_idx'].get_value()[sources] else: real_sources = sources if '_sub_idx' in self.target.variables: real_targets = self.target.variables['_sub_idx'].get_value()[targets] else: real_targets = targets self.variables['_synaptic_pre'].get_value()[old_N:new_N] = real_sources self.variables['_synaptic_post'].get_value()[old_N:new_N] = real_targets else: abstract_code = '_pre_idx = _all_pre \n' abstract_code += '_post_idx = _all_post \n' abstract_code += '_cond = ' + condition + '\n' abstract_code += '_n = ' + str(n) + '\n' abstract_code += '_p = ' + str(p) namespace = get_local_namespace(level + 1) additional_namespace = ('implicit-namespace', namespace) # This overwrites 'i' and 'j' in the synapses' variables dictionary # This is necessary because in the context of synapse creation, i # and j do not correspond to the sources/targets of the existing # synapses but to all the possible sources/targets variables = Variables(None) # Will be set in the template variables.add_auxiliary_variable('i', unit=Unit(1)) variables.add_auxiliary_variable('j', unit=Unit(1)) if '_sub_idx' in self.source.variables: variables.add_reference('_all_pre', self.source.variables['_sub_idx']) else: variables.add_reference('_all_pre', self.source.variables['i']) if '_sub_idx' in self.target.variables: variables.add_reference('_all_post', self.target.variables['_sub_idx']) else: variables.add_reference('_all_post', self.target.variables['i']) variable_indices = defaultdict(lambda: '_idx') for varname in self.variables: if self.variables.indices[varname] == '_presynaptic_idx': variable_indices[varname] = '_all_pre' elif self.variables.indices[varname] == '_postsynaptic_idx': variable_indices[varname] = '_all_post' variable_indices['_all_pre'] = 'i' variable_indices['_all_post'] = 'j' codeobj = create_runner_codeobj(self, abstract_code, 'synapses_create', variable_indices=variable_indices, additional_variables=variables, additional_namespace=additional_namespace, check_units=False ) codeobj() def calc_indices(self, index): ''' Returns synaptic indices for `index`, which can be a tuple of indices (including arrays and slices), a single index or a string. ''' if (not isinstance(index, (tuple, basestring)) and isinstance(index, (int, np.ndarray, slice, collections.Sequence))): index = (index, slice(None), slice(None)) if isinstance(index, tuple): if len(index) == 2: # two indices (pre- and postsynaptic cell) index = (index[0], index[1], slice(None)) elif len(index) > 3: raise IndexError('Need 1, 2 or 3 indices, got %d.' % len(index)) I, J, K = index pre_synapses = find_synapses(I, self.variables['_synaptic_pre'].get_value() - self.source.start) post_synapses = find_synapses(J, self.variables['_synaptic_post'].get_value() - self.target.start) matching_synapses = np.intersect1d(pre_synapses, post_synapses, assume_unique=True) if isinstance(K, slice) and K == slice(None): return matching_synapses elif isinstance(K, (int, slice)): test_k = slice_to_test(K) else: raise NotImplementedError(('Indexing synapses with arrays not' 'implemented yet')) # We want to access the raw arrays here, not go through the Variable pre_neurons = self.variables['_synaptic_pre'].get_value()[pre_synapses] post_neurons = self.variables['_synaptic_post'].get_value()[post_synapses] synapse_numbers = _synapse_numbers(pre_neurons, post_neurons) return np.intersect1d(matching_synapses, np.flatnonzero(test_k(synapse_numbers)), assume_unique=True) else: raise IndexError('Unsupported index type {itype}'.format(itype=type(index)))
def simulate(to_file=True): common_params = { # Parameters common to all neurons. 'C': 100*b2.pF, 'tau_m': 10*b2.ms, 'EL': -60*b2.mV, 'DeltaT': 2*b2.mV, 'Vreset': -65, # *b2.mV 'VTmean': -50*b2.mV, 'VTsd': 2*b2.mV, 'delay': 0.*b2.ms, } common_params['gL'] = common_params['C'] / common_params['tau_m'] E_cell_params = dict( common_params, **{ 'Ncells': num_E_cells, 'IXmean': 30. * b2.pA, # 30 'IXsd': 20. * b2.pA }) I_cell_params = dict( common_params, **{ 'Ncells': num_I_cells, 'IXmean': 30. * b2.pA, 'IXsd': 80. * b2.pA, 'p_rate': 400.0 * b2.Hz }) param_I_syn = { "Erev_i": -80.0 * b2.mV, "Erev_x": 0.0 * b2.mV, "Erev_e": 0.0 * b2.mV, "Tau_i": 3.0 * b2.ms, "Tau_e": 4.0 * b2.ms, "Tau_x": 4.0 * b2.ms, "w_i": 1.5, # *b2.nsiemens, # Peak conductance "w_x": 1.1, # *b2.nsiemens, # (0.8 in paper) "w_e": 0.2, # *b2.nsiemens, "p_i": 0.05, "p_e": 0.05, } param_E_syn = { "Erev_i": -80.0 * b2.mV, "Erev_x": 0.0 * b2.mV, "Erev_e": 0.0 * b2.mV, "Tau_i": 3.5 * b2.ms, "Tau_e": 4.0 * b2.ms, "Tau_x": 4.0 * b2.ms, "w_i": 0.6 * b2.nsiemens, # *b2.nsiemens, # Peak conductance "w_x": 1.4 * b2.nsiemens, "w_e": 0.1 * b2.nsiemens, "p_i": 0.1, "p_e": 0.05, } if state == "gamma": print('Gamma oscillation state.') param_I_syn['w_x'] = 0.3 * b2.nS param_I_syn['w_e'] = 0.4 * b2.nS elif state == "beta": param_I_syn['w_x'] = 0.5 * b2.nS param_I_syn['Tau_x'] = 12. * b2.ms param_E_syn['w_x'] = 0.55 * b2.nS param_E_syn['Tau_x'] = 12. * b2.ms param_E_syn['w_e'] = 0.05 * b2.nS param_E_syn['Tau_e'] = 12. * b2.ms param_I_syn['w_e'] = 0.1 * b2.nS param_E_syn['w_i'] = 0.1 * b2.nS param_E_syn['Tau_i'] = 15. * b2.ms param_I_syn['w_i'] = 0.2 * b2.nS param_I_syn['Tau_i'] = 15. * b2.ms eqs = Equations(""" VT : volt IX : amp I_syn_e = g_syn_e * (Erev_e - vm): amp I_syn_i = g_syn_i * (Erev_i - vm): amp I_syn_x = g_syn_x * (Erev_x - vm): amp Im = IX + gL * (EL - vm) + gL * DeltaT * exp((vm - VT) / DeltaT) : amp ds_e/dt = -s_e / Tau_e : siemens dg_syn_e/dt = (s_e - g_syn_e) / Tau_e : siemens ds_i/dt = -s_i / Tau_i : siemens dg_syn_i/dt = (s_i - g_syn_i) / Tau_i : siemens ds_x/dt = -s_x / Tau_x : siemens dg_syn_x/dt = (s_x - g_syn_x) / Tau_x : siemens dvm/dt = (Im + I_syn_e + I_syn_i + I_syn_x) / C : volt """) I_cells = b2.NeuronGroup(I_cell_params['Ncells'], model=eqs, dt=dt0, method=integration_method, threshold="vm > 0.*mV", refractory="vm > 0.*mV", reset="vm={}*mV".format(common_params['Vreset']), namespace={ **common_params, **param_I_syn }) E_cells = b2.NeuronGroup(E_cell_params['Ncells'], model=eqs, dt=dt0, method=integration_method, threshold="vm > 0.*mV", refractory="vm > 0.*mV", reset="vm={}*mV".format(common_params['Vreset']), namespace={ **common_params, **param_E_syn }) # rates = '400.0*(1 + 0.35 * cos(2*pi*sin(2*pi*t/(100*ms)) + pi + 2*pi/N + (1.0*i/N)*2*pi))*Hz' Poisson_to_E = b2.PoissonGroup( E_cell_params['Ncells'], rates='400.0*(1+0.35*cos(2*pi*sin(2*pi*t/({:d}*ms))+pi+' '2*pi/{:d} + (1.0*i/{:d})*2*pi))*Hz'.format(sim_duration, E_cell_params['Ncells'], E_cell_params['Ncells'])) Poisson_to_I = b2.PoissonGroup(I_cell_params['Ncells'], rates=I_cell_params["p_rate"]) # --------------------------------------------------------------- cEE = b2.Synapses(E_cells, E_cells, dt=dt0, delay=common_params['delay'], on_pre='s_e+= {}*nS'.format(param_E_syn['w_e']), namespace={ **common_params, **param_E_syn }) cEE.connect(p="{:g}".format(param_E_syn["p_e"])) #, condition='i!=j' cII = b2.Synapses(I_cells, I_cells, dt=dt0, delay=common_params['delay'], method=integration_method, on_pre='s_i+= {}*nS'.format(param_I_syn['w_i']), namespace={ **common_params, **param_I_syn }) cII.connect(p="{:g}".format(param_I_syn["p_e"])) #, condition='i!=j' cIE = b2.Synapses(E_cells, I_cells, dt=dt0, method=integration_method, on_pre='s_e+={}*nsiemens'.format(param_I_syn["w_e"])) cIE.connect(p=param_I_syn["p_e"]) cEI = b2.Synapses(I_cells, E_cells, dt=dt0, delay=common_params['delay'], method=integration_method, on_pre='s_i+={}*nsiemens'.format(param_I_syn["w_i"])) cEI.connect(p=param_I_syn["p_i"]) cEX = b2.Synapses(Poisson_to_E, E_cells, dt=dt0, delay=common_params['delay'], method=integration_method, on_pre="s_x += {}*nS".format(param_E_syn["w_x"])) cEX.connect(j='i') cIX = b2.Synapses(Poisson_to_I, I_cells, dt=dt0, delay=common_params['delay'], method=integration_method, on_pre="s_x += {}*nS".format(param_I_syn["w_x"])) cIX.connect(j='i') # Initialise random parameters.---------------------------------- E_cells.VT = (randn(len(E_cells)) * common_params['VTsd'] + common_params['VTmean']) I_cells.VT = (randn(len(I_cells)) * common_params['VTsd'] + common_params['VTmean']) E_cells.IX = (randn(len(E_cells)) * E_cell_params['IXsd'] + E_cell_params['IXmean']) I_cells.IX = (randn(len(I_cells)) * I_cell_params['IXsd'] + I_cell_params['IXmean']) I_cells.vm = randn(len(I_cells)) * 10 * b2.mV - 60 * b2.mV E_cells.vm = randn(len(E_cells)) * 10 * b2.mV - 60 * b2.mV spike_mon_E = b2.SpikeMonitor(E_cells) spike_mon_I = b2.SpikeMonitor(I_cells) LFP_E = b2.PopulationRateMonitor(E_cells) LFP_I = b2.PopulationRateMonitor(I_cells) state_monitor_E = state_monitor_I = None if rocord_voltages: state_monitor_E = b2.StateMonitor(E_cells, "vm", record=True, dt=dt0) state_monitor_I = b2.StateMonitor(I_cells, "vm", record=True, dt=dt0) net = b2.Network(E_cells) net.add(I_cells) net.add(spike_mon_E) net.add(spike_mon_I) net.add(LFP_E) net.add(LFP_I) net.add(cEE) net.add(cII) net.add(cEI) net.add(cIE) net.add(cIX) net.add(cEX) if rocord_voltages: net.add(state_monitor_E) net.add(state_monitor_I) # ---------------------------------------------------------------- print('Simulation running...') start_time = time.time() b2.run(sim_duration * b2.ms) duration = time.time() - start_time print('Simulation time:', duration, 'seconds') # ---------------------------------------------------------------- if to_file: to_npz(spike_mon_E, LFP_E, "data/E") to_npz(spike_mon_I, LFP_I, "data/I")
def __init__(self, morphology=None, model=None, threshold=None, refractory=False, reset=None, events=None, threshold_location=None, dt=None, clock=None, order=0, Cm=0.9 * uF / cm ** 2, Ri=150 * ohm * cm, name='spatialneuron*', dtype=None, namespace=None, method=('linear', 'exponential_euler', 'rk2', 'heun')): # #### Prepare and validate equations if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Insert the threshold mechanism at the specified location if threshold_location is not None: if hasattr(threshold_location, '_indices'): # assuming this is a method threshold_location = threshold_location._indices() # for now, only a single compartment allowed if len(threshold_location) == 1: threshold_location = threshold_location[0] else: raise AttributeError(('Threshold can only be applied on a ' 'single location')) threshold = '(' + threshold + ') and (i == ' + str(threshold_location) + ')' # Check flags (we have point currents) model.check_flags({DIFFERENTIAL_EQUATION: ('point current',), PARAMETER: ('constant', 'shared', 'linked', 'point current'), SUBEXPRESSION: ('shared', 'point current')}) # Add the membrane potential model += Equations(''' v:volt # membrane potential ''') # Extract membrane equation if 'Im' in model: membrane_eq = model['Im'] # the membrane equation else: raise TypeError('The transmembrane current Im must be defined') # Insert point currents in the membrane equation for eq in model.itervalues(): if 'point current' in eq.flags: fail_for_dimension_mismatch(eq.unit, amp, "Point current " + eq.varname + " should be in amp") eq.flags.remove('point current') membrane_eq.expr = Expression( str(membrane_eq.expr.code) + '+' + eq.varname + '/area') ###### Process model equations (Im) to extract total conductance and the remaining current # Check conditional linearity with respect to v # Match to _A*v+_B var = sp.Symbol('v', real=True) wildcard = sp.Wild('_A', exclude=[var]) constant_wildcard = sp.Wild('_B', exclude=[var]) pattern = wildcard * var + constant_wildcard # Expand expressions in the membrane equation membrane_eq.type = DIFFERENTIAL_EQUATION for var, expr in model.get_substituted_expressions(): if var == 'Im': Im_expr = expr membrane_eq.type = SUBEXPRESSION # Factor out the variable s_expr = sp.collect(str_to_sympy(Im_expr.code).expand(), var) matches = s_expr.match(pattern) if matches is None: raise TypeError, "The membrane current must be linear with respect to v" a, b = (matches[wildcard], matches[constant_wildcard]) # Extracts the total conductance from Im, and the remaining current minusa_str, b_str = sympy_to_str(-a), sympy_to_str(b) # Add correct units if necessary if minusa_str == '0': minusa_str += '*siemens/meter**2' if b_str == '0': b_str += '*amp/meter**2' gtot_str = "gtot__private=" + minusa_str + ": siemens/meter**2" I0_str = "I0__private=" + b_str + ": amp/meter**2" model += Equations(gtot_str + "\n" + I0_str) # Insert morphology (store a copy) self.morphology = copy.deepcopy(morphology) # Flatten the morphology self.flat_morphology = FlatMorphology(morphology) # Equations for morphology # TODO: check whether Cm and Ri are already in the equations # no: should be shared instead of constant # yes: should be constant (check) eqs_constants = Equations(""" length : meter (constant) distance : meter (constant) area : meter**2 (constant) volume : meter**3 diameter : meter (constant) Cm : farad/meter**2 (constant) Ri : ohm*meter (constant, shared) r_length_1 : meter (constant) r_length_2 : meter (constant) time_constant = Cm/gtot__private : second space_constant = (2/pi)**(1.0/3.0) * (area/(1/r_length_1 + 1/r_length_2))**(1.0/6.0) / (2*(Ri*gtot__private)**(1.0/2.0)) : meter """) if self.flat_morphology.has_coordinates: eqs_constants += Equations(''' x : meter (constant) y : meter (constant) z : meter (constant) ''') NeuronGroup.__init__(self, morphology.total_compartments, model=model + eqs_constants, threshold=threshold, refractory=refractory, reset=reset, events=events, method=method, dt=dt, clock=clock, order=order, namespace=namespace, dtype=dtype, name=name) # Parameters and intermediate variables for solving the cable equations # Note that some of these variables could have meaningful physical # units (e.g. _v_star is in volt, _I0_all is in amp/meter**2 etc.) but # since these variables should never be used in user code, we don't # assign them any units self.variables.add_arrays(['_ab_star0', '_ab_star1', '_ab_star2', '_a_minus0', '_a_minus1', '_a_minus2', '_a_plus0', '_a_plus1', '_a_plus2', '_b_plus', '_b_minus', '_v_star', '_u_plus', '_u_minus', # The following three are for solving the # three tridiag systems in parallel '_c1', '_c2', '_c3', # The following two are only necessary for # C code where we cannot deal with scalars # and arrays interchangeably: '_I0_all', '_gtot_all'], unit=1, size=self.N, read_only=True) self.Cm = Cm self.Ri = Ri # These explict assignments will load the morphology values from disk # in standalone mode self.distance_ = self.flat_morphology.distance self.length_ = self.flat_morphology.length self.area_ = self.flat_morphology.area self.diameter_ = self.flat_morphology.diameter self.r_length_1_ = self.flat_morphology.r_length_1 self.r_length_2_ = self.flat_morphology.r_length_2 if self.flat_morphology.has_coordinates: self.x_ = self.flat_morphology.x self.y_ = self.flat_morphology.y self.z_ = self.flat_morphology.z # Performs numerical integration step self.add_attribute('diffusion_state_updater') self.diffusion_state_updater = SpatialStateUpdater(self, method, clock=self.clock, order=order) # Creation of contained_objects that do the work self.contained_objects.extend([self.diffusion_state_updater])
def __init__(self, source, target=None, model=None, pre=None, post=None, connect=False, delay=None, namespace=None, dtype=None, codeobj_class=None, clock=None, method=None, name='synapses*'): self._N = 0 Group.__init__(self, when=clock, name=name) self.codeobj_class = codeobj_class self.source = weakref.proxy(source) if target is None: self.target = self.source else: self.target = weakref.proxy(target) ##### Prepare and validate equations if model is None: model = '' if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Check flags model.check_flags({ DIFFERENTIAL_EQUATION: ['event-driven'], SUBEXPRESSION: ['summed', 'scalar'], PARAMETER: ['constant', 'scalar'] }) # Add the lastupdate variable, needed for event-driven updates if 'lastupdate' in model._equations: raise SyntaxError('lastupdate is a reserved name.') model._equations['lastupdate'] = SingleEquation( PARAMETER, 'lastupdate', second) self._create_variables(model) # Separate the equations into event-driven equations, # continuously updated equations and summed variable updates event_driven = [] continuous = [] summed_updates = [] for single_equation in model.itervalues(): if 'event-driven' in single_equation.flags: event_driven.append(single_equation) elif 'summed' in single_equation.flags: summed_updates.append(single_equation) else: continuous.append(single_equation) if len(event_driven): self.event_driven = Equations(event_driven) else: self.event_driven = None self.equations = Equations(continuous) if namespace is None: namespace = {} #: The group-specific namespace self.namespace = namespace #: Set of `Variable` objects that should be resized when the #: number of synapses changes self._registered_variables = set() for varname, var in self.variables.iteritems(): if isinstance(var, DynamicArrayVariable): # Register the array with the `SynapticItemMapping` object so # it gets automatically resized self.register_variable(var) if delay is None: delay = {} if isinstance(delay, Quantity): delay = {'pre': delay} elif not isinstance(delay, collections.Mapping): raise TypeError('Delay argument has to be a quantity or a ' 'dictionary, is type %s instead.' % type(delay)) #: List of names of all updaters, e.g. ['pre', 'post'] self._synaptic_updaters = [] #: List of all `SynapticPathway` objects self._pathways = [] for prepost, argument in zip(('pre', 'post'), (pre, post)): if not argument: continue if isinstance(argument, basestring): pathway_delay = delay.get(prepost, None) self._add_updater(argument, prepost, delay=pathway_delay) elif isinstance(argument, collections.Mapping): for key, value in argument.iteritems(): if not isinstance(key, basestring): err_msg = ('Keys for the "{}" argument' 'have to be strings, got ' '{} instead.').format(prepost, type(key)) raise TypeError(err_msg) pathway_delay = delay.get(key, None) self._add_updater(value, prepost, objname=key, delay=pathway_delay) # Check whether any delays were specified for pathways that don't exist for pathway in delay: if not pathway in self._synaptic_updaters: raise ValueError(('Cannot set the delay for pathway ' '"%s": unknown pathway.') % pathway) # If we have a pathway called "pre" (the most common use case), provide # direct access to its delay via a delay attribute (instead of having # to use pre.delay) if 'pre' in self._synaptic_updaters: self.variables.add_reference('delay', self.pre.variables['delay']) #: Performs numerical integration step self.state_updater = None # We only need a state update if we have differential equations if len(self.equations.diff_eq_names): self.state_updater = StateUpdater(self, method) self.contained_objects.append(self.state_updater) #: "Summed variable" mechanism -- sum over all synapses of a #: pre-/postsynaptic target self.summed_updaters = {} # We want to raise an error if the same variable is updated twice # using this mechanism. This could happen if the Synapses object # connected a NeuronGroup to itself since then all variables are # accessible as var_pre and var_post. summed_targets = set() for single_equation in summed_updates: varname = single_equation.varname if not (varname.endswith('_pre') or varname.endswith('_post')): raise ValueError(('The summed variable "%s" does not end ' 'in "_pre" or "_post".') % varname) if not varname in self.variables: raise ValueError(('The summed variable "%s" does not refer' 'do any known variable in the ' 'target group.') % varname) if varname.endswith('_pre'): summed_target = self.source orig_varname = varname[:-4] else: summed_target = self.target orig_varname = varname[:-5] target_eq = getattr(summed_target, 'equations', {}).get(orig_varname, None) if target_eq is None or target_eq.type != PARAMETER: raise ValueError(('The summed variable "%s" needs a ' 'corresponding parameter "%s" in the ' 'target group.') % (varname, orig_varname)) fail_for_dimension_mismatch( self.variables['_summed_' + varname].unit, self.variables[varname].unit, ('Summed variables need to have ' 'the same units in Synapses ' 'and the target group')) if self.variables[varname] in summed_targets: raise ValueError(('The target variable "%s" is already ' 'updated by another summed ' 'variable') % orig_varname) summed_targets.add(self.variables[varname]) updater = SummedVariableUpdater(single_equation.expr, varname, self, summed_target) self.summed_updaters[varname] = updater self.contained_objects.append(updater) # Do an initial connect, if requested if not isinstance(connect, (bool, basestring)): raise TypeError( ('"connect" keyword has to be a boolean value or a ' 'string, is type %s instead.' % type(connect))) self._initial_connect = connect if not connect is False: self.connect(connect, level=1) # Activate name attribute access self._enable_group_attributes()
def get_sensitivity_equations(group, parameters, namespace=None, level=1, optimize=True): """ Get equations for sensitivity variables. Parameters ---------- group : `NeuronGroup` The group of neurons that will be simulated. parameters : list of str Names of the parameters that are fit. namespace : dict, optional The namespace to use. level : `int`, optional How much farther to go down in the stack to find the namespace. optimize : bool, optional Whether to remove sensitivity variables from the equations that do not evolve if initialized to zero (e.g. ``dS_x_y/dt = -S_x_y/tau`` would be removed). This avoids unnecessary computation but will fail in the rare case that such a sensitivity variable needs to be initialized to a non-zero value. Defaults to ``True``. Returns ------- sensitivity_eqs : `Equations` The equations for the sensitivity variables. """ if namespace is None: namespace = get_local_namespace(level) namespace.update(group.namespace) eqs = group.equations diff_eqs = eqs.get_substituted_expressions(group.variables) diff_eq_names = [name for name, _ in diff_eqs] system = sympy.Matrix( [str_to_sympy(diff_eq[1].code) for diff_eq in diff_eqs]) J = system.jacobian([str_to_sympy(d) for d in diff_eq_names]) sensitivity = [] sensitivity_names = [] for parameter in parameters: F = system.jacobian([str_to_sympy(parameter)]) names = [ str_to_sympy(f'S_{diff_eq_name}_{parameter}') for diff_eq_name in diff_eq_names ] sensitivity.append(J * sympy.Matrix(names) + F) sensitivity_names.append(names) new_eqs = [] for names, sensitivity_eqs, param in zip(sensitivity_names, sensitivity, parameters): for name, eq, orig_var in zip(names, sensitivity_eqs, diff_eq_names): if param in namespace: unit = eqs[orig_var].dim / namespace[param].dim elif param in group.variables: unit = eqs[orig_var].dim / group.variables[param].dim else: raise AssertionError( f'Parameter {param} neither in namespace nor variables') unit = repr(unit) if not unit.is_dimensionless else '1' if optimize: # Check if the equation stays at zero if initialized at zero zeroed = eq.subs(name, sympy.S.Zero) if zeroed == sympy.S.Zero: # No need to include equation as differential equation if unit == '1': new_eqs.append(f'{sympy_to_str(name)} = 0 : {unit}') else: new_eqs.append( f'{sympy_to_str(name)} = 0*{unit} : {unit}') continue rhs = sympy_to_str(eq) if rhs == '0': # avoid unit mismatch rhs = f'0*{unit}/second' new_eqs.append('d{lhs}/dt = {rhs} : {unit}'.format( lhs=sympy_to_str(name), rhs=rhs, unit=unit)) new_eqs = Equations('\n'.join(new_eqs)) return new_eqs
def __init__(self, dt, model, input, output, input_var, output_var, n_samples, threshold, reset, refractory, method, param_init, use_units=True): """Initialize the fitter.""" if dt is None: raise ValueError("dt-sampling frequency of the input must be set") if isinstance(model, str): model = Equations(model) if input_var not in model.identifiers: raise NameError("%s is not an identifier in the model" % input_var) self.dt = dt self.simulator = None self.parameter_names = model.parameter_names self.n_traces, n_steps = input.shape self.duration = n_steps * dt self.n_neurons = self.n_traces * n_samples self.n_samples = n_samples self.method = method self.threshold = threshold self.reset = reset self.refractory = refractory self.input = input self.output_var = output_var if output_var == 'spikes': self.output_dim = DIMENSIONLESS else: self.output_dim = model[output_var].dim self.model = model self.use_units = use_units input_dim = get_dimensions(input) input_dim = '1' if input_dim is DIMENSIONLESS else repr(input_dim) input_eqs = "{} = input_var(t, i % n_traces) : {}".format( input_var, input_dim) self.model += input_eqs input_traces = TimedArray(input.transpose(), dt=dt) self.input_traces = input_traces # initialization of attributes used later self._best_params = None self._best_error = None self.optimizer = None self.metric = None if not param_init: param_init = {} for param, val in param_init.items(): if not (param in self.model.diff_eq_names or param in self.model.parameter_names): raise ValueError("%s is not a model variable or a " "parameter in the model" % param) self.param_init = param_init
def __init__(self, source, target=None, model=None, pre=None, post=None, connect=False, delay=None, namespace=None, dtype=None, codeobj_class=None, clock=None, method=None, name='synapses*'): BrianObject.__init__(self, when=clock, name=name) self.codeobj_class = codeobj_class self.source = weakref.proxy(source) if target is None: self.target = self.source else: self.target = weakref.proxy(target) ##### Prepare and validate equations if model is None: model = '' if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Check flags model.check_flags({ DIFFERENTIAL_EQUATION: ['event-driven', 'lumped'], STATIC_EQUATION: ['lumped'], PARAMETER: ['constant', 'lumped'] }) # Separate the equations into event-driven and continuously updated # equations event_driven = [] continuous = [] for single_equation in model.itervalues(): if 'event-driven' in single_equation.flags: if 'lumped' in single_equation.flags: raise ValueError( ('Event-driven variable %s cannot be ' 'a lumped variable.') % single_equation.varname) event_driven.append(single_equation) else: continuous.append(single_equation) # Add the lastupdate variable, used by event-driven equations continuous.append(SingleEquation(PARAMETER, 'lastupdate', second)) if len(event_driven): self.event_driven = Equations(event_driven) else: self.event_driven = None self.equations = Equations(continuous) ##### Setup the memory self.arrays = self._allocate_memory(dtype=dtype) # Setup the namespace self._given_namespace = namespace self.namespace = create_namespace(namespace) self._queues = {} self._delays = {} self.item_mapping = SynapticItemMapping(self) self.indices = { '_idx': self.item_mapping, '_presynaptic_idx': self.item_mapping.synaptic_pre, '_postsynaptic_idx': self.item_mapping.synaptic_post } # Allow S.i instead of S.indices.i, etc. self.i = self.item_mapping.i self.j = self.item_mapping.j self.k = self.item_mapping.k # Setup variables self.variables = self._create_variables() #: List of names of all updaters, e.g. ['pre', 'post'] self._updaters = [] for prepost, argument in zip(('pre', 'post'), (pre, post)): if not argument: continue if isinstance(argument, basestring): self._add_updater(argument, prepost) elif isinstance(argument, collections.Mapping): for key, value in argument.iteritems(): if not isinstance(key, basestring): err_msg = ('Keys for the "{}" argument' 'have to be strings, got ' '{} instead.').format(prepost, type(key)) raise TypeError(err_msg) self._add_updater(value, prepost, objname=key) # If we have a pathway called "pre" (the most common use case), provide # direct access to its delay via a delay attribute (instead of having # to use pre.delay) if 'pre' in self._updaters: self.variables['delay'] = self.pre.variables['delay'] if delay is not None: if isinstance(delay, Quantity): if not 'pre' in self._updaters: raise ValueError( ('Cannot set delay, no "pre" pathway exists.' 'Use a dictionary if you want to set the ' 'delay for a pathway with a different name.')) delay = {'pre': delay} if not isinstance(delay, collections.Mapping): raise TypeError('Delay argument has to be a quantity or a ' 'dictionary, is type %s instead.' % type(delay)) for pathway, pathway_delay in delay.iteritems(): if not pathway in self._updaters: raise ValueError(('Cannot set the delay for pathway ' '"%s": unknown pathway.') % pathway) if not isinstance(pathway_delay, Quantity): raise TypeError(('Cannot set the delay for pathway "%s": ' 'expected a quantity, got %s instead.') % (pathway, type(pathway_delay))) if pathway_delay.size != 1: raise TypeError(('Cannot set the delay for pathway "%s": ' 'expected a scalar quantity, got a ' 'quantity with shape %s instead.') % str(pathway_delay.shape)) fail_for_dimension_mismatch(pathway_delay, second, ('Delay has to be ' 'specified in units ' 'of seconds')) updater = getattr(self, pathway) self.item_mapping.unregister_variable(updater._delays) del updater._delays # For simplicity, store the delay as a one-element array # so that for example updater._delays[:] works. updater._delays = np.array([float(pathway_delay)]) variable = ArrayVariable('delay', second, updater._delays, group_name=self.name, scalar=True) updater.variables['delay'] = variable if pathway == 'pre': self.variables['delay'] = variable #: Performs numerical integration step self.state_updater = StateUpdater(self, method) self.contained_objects.append(self.state_updater) #: "Lumped variable" mechanism -- sum over all synapses of a #: postsynaptic target self.lumped_updaters = {} for single_equation in self.equations.itervalues(): if 'lumped' in single_equation.flags: varname = single_equation.varname # For a lumped variable, we need an equivalent parameter in the # target group if not varname in self.target.variables: raise ValueError( ('The lumped variable %s needs a variable ' 'of the same name in the target ' 'group ') % single_equation.varname) fail_for_dimension_mismatch(self.variables[varname].unit, self.target.variables[varname], ('Lumped variables need to have ' 'the same units in Synapses ' 'and the target group')) # TODO: Add some more stringent check about the type of # variable in the target group updater = LumpedUpdater(varname, self, self.target) self.lumped_updaters[varname] = updater self.contained_objects.append(updater) # Do an initial connect, if requested if not isinstance(connect, (bool, basestring)): raise TypeError( ('"connect" keyword has to be a boolean value or a ' 'string, is type %s instead.' % type(connect))) self._initial_connect = connect if not connect is False: self.connect(connect, level=1) # Activate name attribute access Group.__init__(self)
class Synapses(BrianObject, Group): ''' Class representing synaptic connections. Creating a new `Synapses` object does by default not create any synapses -- you either have to provide the `connect` argument or call the `Synapses.connect` method for that. Parameters ---------- source : `SpikeSource` The source of spikes, e.g. a `NeuronGroup`. target : `Group`, optional The target of the spikes, typically a `NeuronGroup`. If none is given, the same as `source` model : {`str`, `Equations`}, optional The model equations for the synapses. pre : {str, dict}, optional The code that will be executed after every pre-synaptic spike. Can be either a single (possibly multi-line) string, or a dictionary mapping pathway names to code strings. In the first case, the pathway will be called ``pre`` and made available as an attribute of the same name. In the latter case, the given names will be used as the pathway/attribute names. Each pathway has its own code and its own delays. post : {str, dict}, optional The code that will be executed after every post-synaptic spike. Same conventions as for `pre`, the default name for the pathway is ``post``. connect : {str, bool}. optional Determines whether any actual synapses are created. ``False`` (the default) means not to create any synapses, ``True`` means to create synapses between all source/target pairs. Also accepts a string expression that evaluates to ``True`` for every synapse that should be created, e.g. ``'i == j'`` for a one-to-one connectivity. See `Synapses.connect` for more details. delay : {`Quantity`, dict}, optional The delay for the "pre" pathway (same for all synapses) or a dictionary mapping pathway names to delays. If a delay is specified in this way for a pathway, it is stored as a single scalar value. It can still be changed afterwards, but only to a single scalar value. If you want to have delays that vary across synapses, do not use the keyword argument, but instead set the delays via the attribute of the pathway, e.g. ``S.pre.delay = ...`` (or ``S.delay = ...`` as an abbreviation), ``S.post.delay = ...``, etc. namespace : dict, optional A dictionary mapping identifier names to objects. If not given, the namespace will be filled in at the time of the call of `Network.run`, with either the values from the ``network`` argument of the `Network.run` method or from the local context, if no such argument is given. dtype : `dtype`, optional The standard datatype for all state variables. Defaults to `core.default_scalar_type`. codeobj_class : class, optional The `CodeObject` class to use to run code. clock : `Clock`, optional The clock to use. method : {str, `StateUpdateMethod`}, optional The numerical integration method to use. If none is given, an appropriate one is automatically determined. name : str, optional The name for this object. If none is given, a unique name of the form ``synapses``, ``synapses_1``, etc. will be automatically chosen. ''' def __init__(self, source, target=None, model=None, pre=None, post=None, connect=False, delay=None, namespace=None, dtype=None, codeobj_class=None, clock=None, method=None, name='synapses*'): BrianObject.__init__(self, when=clock, name=name) self.codeobj_class = codeobj_class self.source = weakref.proxy(source) if target is None: self.target = self.source else: self.target = weakref.proxy(target) ##### Prepare and validate equations if model is None: model = '' if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Check flags model.check_flags({ DIFFERENTIAL_EQUATION: ['event-driven', 'lumped'], STATIC_EQUATION: ['lumped'], PARAMETER: ['constant', 'lumped'] }) # Separate the equations into event-driven and continuously updated # equations event_driven = [] continuous = [] for single_equation in model.itervalues(): if 'event-driven' in single_equation.flags: if 'lumped' in single_equation.flags: raise ValueError( ('Event-driven variable %s cannot be ' 'a lumped variable.') % single_equation.varname) event_driven.append(single_equation) else: continuous.append(single_equation) # Add the lastupdate variable, used by event-driven equations continuous.append(SingleEquation(PARAMETER, 'lastupdate', second)) if len(event_driven): self.event_driven = Equations(event_driven) else: self.event_driven = None self.equations = Equations(continuous) ##### Setup the memory self.arrays = self._allocate_memory(dtype=dtype) # Setup the namespace self._given_namespace = namespace self.namespace = create_namespace(namespace) self._queues = {} self._delays = {} self.item_mapping = SynapticItemMapping(self) self.indices = { '_idx': self.item_mapping, '_presynaptic_idx': self.item_mapping.synaptic_pre, '_postsynaptic_idx': self.item_mapping.synaptic_post } # Allow S.i instead of S.indices.i, etc. self.i = self.item_mapping.i self.j = self.item_mapping.j self.k = self.item_mapping.k # Setup variables self.variables = self._create_variables() #: List of names of all updaters, e.g. ['pre', 'post'] self._updaters = [] for prepost, argument in zip(('pre', 'post'), (pre, post)): if not argument: continue if isinstance(argument, basestring): self._add_updater(argument, prepost) elif isinstance(argument, collections.Mapping): for key, value in argument.iteritems(): if not isinstance(key, basestring): err_msg = ('Keys for the "{}" argument' 'have to be strings, got ' '{} instead.').format(prepost, type(key)) raise TypeError(err_msg) self._add_updater(value, prepost, objname=key) # If we have a pathway called "pre" (the most common use case), provide # direct access to its delay via a delay attribute (instead of having # to use pre.delay) if 'pre' in self._updaters: self.variables['delay'] = self.pre.variables['delay'] if delay is not None: if isinstance(delay, Quantity): if not 'pre' in self._updaters: raise ValueError( ('Cannot set delay, no "pre" pathway exists.' 'Use a dictionary if you want to set the ' 'delay for a pathway with a different name.')) delay = {'pre': delay} if not isinstance(delay, collections.Mapping): raise TypeError('Delay argument has to be a quantity or a ' 'dictionary, is type %s instead.' % type(delay)) for pathway, pathway_delay in delay.iteritems(): if not pathway in self._updaters: raise ValueError(('Cannot set the delay for pathway ' '"%s": unknown pathway.') % pathway) if not isinstance(pathway_delay, Quantity): raise TypeError(('Cannot set the delay for pathway "%s": ' 'expected a quantity, got %s instead.') % (pathway, type(pathway_delay))) if pathway_delay.size != 1: raise TypeError(('Cannot set the delay for pathway "%s": ' 'expected a scalar quantity, got a ' 'quantity with shape %s instead.') % str(pathway_delay.shape)) fail_for_dimension_mismatch(pathway_delay, second, ('Delay has to be ' 'specified in units ' 'of seconds')) updater = getattr(self, pathway) self.item_mapping.unregister_variable(updater._delays) del updater._delays # For simplicity, store the delay as a one-element array # so that for example updater._delays[:] works. updater._delays = np.array([float(pathway_delay)]) variable = ArrayVariable('delay', second, updater._delays, group_name=self.name, scalar=True) updater.variables['delay'] = variable if pathway == 'pre': self.variables['delay'] = variable #: Performs numerical integration step self.state_updater = StateUpdater(self, method) self.contained_objects.append(self.state_updater) #: "Lumped variable" mechanism -- sum over all synapses of a #: postsynaptic target self.lumped_updaters = {} for single_equation in self.equations.itervalues(): if 'lumped' in single_equation.flags: varname = single_equation.varname # For a lumped variable, we need an equivalent parameter in the # target group if not varname in self.target.variables: raise ValueError( ('The lumped variable %s needs a variable ' 'of the same name in the target ' 'group ') % single_equation.varname) fail_for_dimension_mismatch(self.variables[varname].unit, self.target.variables[varname], ('Lumped variables need to have ' 'the same units in Synapses ' 'and the target group')) # TODO: Add some more stringent check about the type of # variable in the target group updater = LumpedUpdater(varname, self, self.target) self.lumped_updaters[varname] = updater self.contained_objects.append(updater) # Do an initial connect, if requested if not isinstance(connect, (bool, basestring)): raise TypeError( ('"connect" keyword has to be a boolean value or a ' 'string, is type %s instead.' % type(connect))) self._initial_connect = connect if not connect is False: self.connect(connect, level=1) # Activate name attribute access Group.__init__(self) N = property(fget=lambda self: self.item_mapping.N, doc='Total number of synapses') def __len__(self): return self.N def pre_run(self, namespace): self.lastupdate = self.clock.t super(Synapses, self).pre_run(namespace) def _add_updater(self, code, prepost, objname=None): ''' Add a new target updater. Users should call `add_pre` or `add_post` instead. Parameters ---------- code : str The abstract code that should be executed on pre-/postsynaptic spikes. prepost : {'pre', 'post'} Whether the code is triggered by presynaptic or postsynaptic spikes objname : str, optional A name for the object, see `SynapticPathway` for more details. Returns ------- objname : str The final name for the object. Equals `objname` if it was explicitly given (and did not end in a wildcard character). ''' if prepost == 'pre': spike_group, group_name = self.source, 'Source' elif prepost == 'post': spike_group = self.target, 'Target' else: raise ValueError(('"prepost" argument has to be "pre" or "post", ' 'is "%s".') % prepost) if not hasattr(spike_group, 'spikes') and hasattr( spike_group, 'clock'): raise TypeError(('%s has to be a SpikeSource with spikes and' ' clock attribute. Is type %r instead') % (group_name, type(spike_group))) updater = SynapticPathway(self, code, prepost, objname) objname = updater.objname if hasattr(self, objname): raise ValueError( ('Cannot add updater with name "{name}", synapses ' 'object already has an attribute with this ' 'name.').format(name=objname)) setattr(self, objname, updater) self._updaters.append(objname) self.contained_objects.append(updater) return objname def _create_variables(self): ''' Create the variables dictionary for this `Synapses`, containing entries for the equation variables and some standard entries. ''' # Add all the pre and post variables with _pre and _post suffixes v = {} self.variable_indices = defaultdict(lambda: '_idx') for name, var in getattr(self.source, 'variables', {}).iteritems(): if isinstance(var, (ArrayVariable, Subexpression)): v[name + '_pre'] = var self.variable_indices[name + '_pre'] = '_presynaptic_idx' for name, var in getattr(self.target, 'variables', {}).iteritems(): if isinstance(var, (ArrayVariable, Subexpression)): v[name + '_post'] = var self.variable_indices[name + '_post'] = '_postsynaptic_idx' # Also add all the post variables without a suffix -- if this # clashes with the name of a state variable defined in this # Synapses group, the latter will overwrite the entry later and # take precedence v[name] = var self.variable_indices[name] = '_postsynaptic_idx' # Standard variables always present v.update({ 't': AttributeVariable(second, self.clock, 't_', constant=False), 'dt': AttributeVariable(second, self.clock, 'dt_', constant=True), '_num_source_neurons': Variable(Unit(1), len(self.source), constant=True), '_num_target_neurons': Variable(Unit(1), len(self.target), constant=True), '_synaptic_pre': DynamicArrayVariable('_synaptic_pre', Unit(1), self.item_mapping.synaptic_pre), '_synaptic_post': DynamicArrayVariable('_synaptic_pre', Unit(1), self.item_mapping.synaptic_post), # We don't need "proper" specifier for these -- they go # back to Python code currently '_pre_synaptic': Variable(Unit(1), self.item_mapping.pre_synaptic), '_post_synaptic': Variable(Unit(1), self.item_mapping.post_synaptic) }) for eq in itertools.chain( self.equations.itervalues(), self.event_driven.itervalues() if self.event_driven is not None else []): if eq.type in (DIFFERENTIAL_EQUATION, PARAMETER): array = self.arrays[eq.varname] constant = ('constant' in eq.flags) # We are dealing with dynamic arrays here, code generation # shouldn't directly access the specifier.array attribute but # use specifier.get_value() to get a reference to the underlying # array v[eq.varname] = DynamicArrayVariable(eq.varname, eq.unit, array, group_name=self.name, constant=constant, is_bool=eq.is_bool) if eq.varname in self.variable_indices: # we are overwriting a postsynaptic variable of the same # name, delete the reference to the postsynaptic index del self.variable_indices[eq.varname] # Register the array with the `SynapticItemMapping` object so # it gets automatically resized self.item_mapping.register_variable(array) elif eq.type == STATIC_EQUATION: v.update({ eq.varname: Subexpression(eq.unit, brian_prefs['core.default_scalar_dtype'], str(eq.expr), variables=v, namespace=self.namespace, is_bool=eq.is_bool) }) else: raise AssertionError('Unknown type of equation: ' + eq.eq_type) # Stochastic variables for xi in self.equations.stochastic_variables: v.update({xi: StochasticVariable()}) return v def _allocate_memory(self, dtype=None): # Allocate memory (TODO: this should be refactored somewhere at some point) arrayvarnames = set(eq.varname for eq in self.equations.itervalues() if eq.type in (DIFFERENTIAL_EQUATION, PARAMETER)) if self.event_driven is not None: # Only differential equations are event-driven arrayvarnames |= set(eq.varname for eq in self.event_driven.itervalues()) arrays = {} for name in arrayvarnames: if isinstance(dtype, dict): curdtype = dtype[name] else: curdtype = dtype if curdtype is None: curdtype = brian_prefs['core.default_scalar_dtype'] arrays[name] = DynamicArray1D(0) logger.debug("NeuronGroup memory allocated successfully.") return arrays def connect_one_to_one(self): ''' Manually create a one to one connectivity pattern ''' if len(self.source) != len(self.target): raise TypeError( 'Can only create synapses between groups of same size') self.connect(np.arange(len(self.source)), np.arange(len(self.target))) def connect_full(self): ''' Connect all neurons in the source group to all neurons in the target group. ''' sources, targets = np.meshgrid(np.arange(len(self.source)), np.arange(len(self.target))) self.connect(sources.flat(), targets.flat()) def connect(self, pre_or_cond, post=None, p=1., n=1, level=0): ''' Add synapses. The first argument can be either a presynaptic index (int or array) or a condition for synapse creation in the form of a string that evaluates to a boolean value (or directly a boolean value). If it is given as an index, also `post` has to be present. A string condition will be evaluated for all pre-/postsynaptic indices, which can be referred to as `i` and `j`. Parameters ---------- pre_or_cond : {int, ndarray of int, bool, str} The presynaptic neurons (in the form of an index or an array of indices) or a boolean value or a string that evaluates to a boolean value. If it is an index, then also `post` has to be given. post_neurons : {int, ndarray of int), optional GroupIndices of neurons from the target group. Non-optional if one or more presynaptic indices have been given. p : float, optional The probability to create `n` synapses wherever the condition given as `pre_or_cond` evaluates to true or for the given pre/post indices. n : int, optional The number of synapses to create per pre/post connection pair. Defaults to 1. Examples -------- >>> from brian2 import * >>> import numpy as np >>> G = NeuronGroup(10, 'dv/dt = -v / tau : 1', threshold='v>1', reset='v=0') >>> S = Synapses(G, G, 'w:1', pre='v+=w') >>> S.connect('i != j') # all-to-all but no self-connections >>> S.connect(0, 0) # connect neuron 0 to itself >>> S.connect(np.array([1, 2]), np.array([2, 1])) # connect 1->2 and 2->1 >>> S.connect(True) # connect all-to-all >>> S.connect('i != j', p=0.1) # Connect neurons with 10% probability, exclude self-connections >>> S.connect('i == j', n=2) # Connect all neurons to themselves with 2 synapses ''' if not isinstance(pre_or_cond, (bool, basestring)): pre_or_cond = np.asarray(pre_or_cond) if not np.issubdtype(pre_or_cond.dtype, np.int): raise TypeError( ('Presynaptic indices have to be given as ' 'integers, are type %s instead.') % pre_or_cond.dtype) post = np.asarray(post) if not np.issubdtype(post.dtype, np.int): raise TypeError(('Presynaptic indices can only be combined ' 'with postsynaptic integer indices))')) if isinstance(n, basestring): raise TypeError( ('GroupIndices cannot be combined with a string' 'expression for n. Either use an array/scalar ' 'for n, or a string expression for the ' 'connections')) i, j, n = np.broadcast_arrays(pre_or_cond, post, n) if i.ndim > 1: raise ValueError('Can only use 1-dimensional indices') self.item_mapping._add_synapses(i, j, n, p, level=level + 1) elif isinstance(pre_or_cond, (basestring, bool)): if pre_or_cond is False: return # nothing to do... elif pre_or_cond is True: # TODO: This should not be handled with the general mechanism pre_or_cond = 'True' if post is not None: raise ValueError('Cannot give a postsynaptic index when ' 'using a string expression') if not isinstance(n, (int, basestring)): raise TypeError( 'n has to be an integer or a string evaluating ' 'to an integer, is type %s instead.' % type(n)) if not isinstance(p, (float, basestring)): raise TypeError('p has to be a float or a string evaluating ' 'to an float, is type %s instead.' % type(n)) self.item_mapping._add_synapses(None, None, n, p, condition=pre_or_cond, level=level + 1) else: raise TypeError(('First argument has to be an index or a ' 'string, is %s instead.') % type(pre_or_cond))
def simulate(IXmean=30. * b2.pA, p_rate=100 * b2.Hz): common_params = { # Parameters common to all neurons. 'C': 100 * b2.pfarad, 'tau_m': 10 * b2.ms, 'EL': -60 * b2.mV, 'DeltaT': 2 * b2.mV, 'Vreset': -65, # *b2.mV 'VTmean': -50 * b2.mV, 'VTsd': 2 * b2.mV } common_params['gL'] = common_params['C'] / common_params['tau_m'] E_cell_params = dict( common_params, **{ 'Ncells': num_E_cells, 'IXmean': IXmean, # 30 'IXsd': 20 * b2.pA }) eqs = Equations(""" Im = IX + gL * (EL - vm) + gL * DeltaT * exp((vm - VT) / DeltaT) - gx * (vm - Erev_x) : amp dgx/dt = -gx/Tau_x : siemens VT : volt IX : amp dvm/dt = Im / C : volt """) param_E_syn = { "Erev_x": 0.0 * b2.mV, "Tau_x": 4.0 * b2.ms, "w_x": 1.4, # *b2.nsiemens, # Peak conductance } if state == "beta": param_E_syn['w_x'] = 0.55 * b2.nS param_E_syn['Tau_x'] = 12 * b2.ms E_cells = b2.NeuronGroup(E_cell_params['Ncells'], model=eqs, method=integration_method, threshold="vm > 0.*mV", reset="vm={}*mV".format(E_cell_params['Vreset']), refractory="vm > 0.*mV", namespace={ **common_params, **param_E_syn, }) Poisson_to_E = b2.PoissonGroup(E_cell_params['Ncells'], rates=p_rate) cEX = b2.Synapses(Poisson_to_E, E_cells, method=integration_method, on_pre="gx += {}*nsiemens".format(param_E_syn["w_x"])) cEX.connect(j='i') # Initialise random parameters. E_cells.VT = E_cell_params['VTmean'] E_cells.IX = E_cell_params['IXmean'] spike_monitor_E = b2.SpikeMonitor(E_cells) state_monitor_E = None if record_voltages: state_monitor_E = b2.StateMonitor(E_cells, "vm", record=True, dt=dt0) net = b2.Network(E_cells) if record_voltages: net.add(state_monitor_E) net.add(spike_monitor_E) net.add(cEX) # Randomise initial membrane potentials. E_cells.vm = -60 * b2.mV print('Simulation running...') start_time = time.time() b2.run(sim_duration) duration = time.time() - start_time print('Simulation time:', duration, 'seconds') return spike_monitor_E, state_monitor_E
def __init__(self, morphology=None, model=None, threshold=None, refractory=False, reset=None, events=None, threshold_location=None, dt=None, clock=None, order=0, Cm=0.9 * uF / cm**2, Ri=150 * ohm * cm, name='spatialneuron*', dtype=None, namespace=None, method=('linear', 'exponential_euler', 'rk2', 'heun')): # #### Prepare and validate equations if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Insert the threshold mechanism at the specified location if threshold_location is not None: if hasattr(threshold_location, '_indices'): # assuming this is a method threshold_location = threshold_location._indices() # for now, only a single compartment allowed if len(threshold_location) == 1: threshold_location = threshold_location[0] else: raise AttributeError(('Threshold can only be applied on a ' 'single location')) threshold = '(' + threshold + ') and (i == ' + str( threshold_location) + ')' # Check flags (we have point currents) model.check_flags({ DIFFERENTIAL_EQUATION: ('point current', ), PARAMETER: ('constant', 'shared', 'linked', 'point current'), SUBEXPRESSION: ('shared', 'point current', 'constant over dt') }) #: The original equations as specified by the user (i.e. before #: inserting point-currents into the membrane equation, before adding #: all the internally used variables and constants, etc.). self.user_equations = model # Separate subexpressions depending whether they are considered to be # constant over a time step or not (this would also be done by the # NeuronGroup initializer later, but this would give incorrect results # for the linearity check) model, constant_over_dt = extract_constant_subexpressions(model) # Extract membrane equation if 'Im' in model: if len(model['Im'].flags): raise TypeError( 'Cannot specify any flags for the transmembrane ' 'current Im.') membrane_expr = model['Im'].expr # the membrane equation else: raise TypeError('The transmembrane current Im must be defined') model_equations = [] # Insert point currents in the membrane equation for eq in model.itervalues(): if eq.varname == 'Im': continue # ignore -- handled separately if 'point current' in eq.flags: fail_for_dimension_mismatch( eq.dim, amp, "Point current " + eq.varname + " should be in amp") membrane_expr = Expression( str(membrane_expr.code) + '+' + eq.varname + '/area') eq = SingleEquation( eq.type, eq.varname, eq.dim, expr=eq.expr, flags=list(set(eq.flags) - set(['point current']))) model_equations.append(eq) model_equations.append( SingleEquation(SUBEXPRESSION, 'Im', dimensions=(amp / meter**2).dim, expr=membrane_expr)) model_equations.append(SingleEquation(PARAMETER, 'v', volt.dim)) model = Equations(model_equations) ###### Process model equations (Im) to extract total conductance and the remaining current # Expand expressions in the membrane equation for var, expr in model.get_substituted_expressions( include_subexpressions=True): if var == 'Im': Im_expr = expr break else: raise AssertionError('Model equations did not contain Im!') # Differentiate Im with respect to v Im_sympy_exp = str_to_sympy(Im_expr.code) v_sympy = sp.Symbol('v', real=True) diffed = sp.diff(Im_sympy_exp, v_sympy) unevaled_derivatives = diffed.atoms(sp.Derivative) if len(unevaled_derivatives): raise TypeError( 'Cannot take the derivative of "{Im}" with respect ' 'to v.'.format(Im=Im_expr.code)) gtot_str = sympy_to_str(sp.simplify(-diffed)) I0_str = sympy_to_str(sp.simplify(Im_sympy_exp - diffed * v_sympy)) if gtot_str == '0': gtot_str += '*siemens/meter**2' if I0_str == '0': I0_str += '*amp/meter**2' gtot_str = "gtot__private=" + gtot_str + ": siemens/meter**2" I0_str = "I0__private=" + I0_str + ": amp/meter**2" model += Equations(gtot_str + "\n" + I0_str) # Insert morphology (store a copy) self.morphology = copy.deepcopy(morphology) # Flatten the morphology self.flat_morphology = FlatMorphology(morphology) # Equations for morphology # TODO: check whether Cm and Ri are already in the equations # no: should be shared instead of constant # yes: should be constant (check) eqs_constants = Equations(""" length : meter (constant) distance : meter (constant) area : meter**2 (constant) volume : meter**3 Ic : amp/meter**2 diameter : meter (constant) Cm : farad/meter**2 (constant) Ri : ohm*meter (constant, shared) r_length_1 : meter (constant) r_length_2 : meter (constant) time_constant = Cm/gtot__private : second space_constant = (2/pi)**(1.0/3.0) * (area/(1/r_length_1 + 1/r_length_2))**(1.0/6.0) / (2*(Ri*gtot__private)**(1.0/2.0)) : meter """) if self.flat_morphology.has_coordinates: eqs_constants += Equations(''' x : meter (constant) y : meter (constant) z : meter (constant) ''') NeuronGroup.__init__(self, morphology.total_compartments, model=model + eqs_constants, threshold=threshold, refractory=refractory, reset=reset, events=events, method=method, dt=dt, clock=clock, order=order, namespace=namespace, dtype=dtype, name=name) # Parameters and intermediate variables for solving the cable equations # Note that some of these variables could have meaningful physical # units (e.g. _v_star is in volt, _I0_all is in amp/meter**2 etc.) but # since these variables should never be used in user code, we don't # assign them any units self.variables.add_arrays( [ '_ab_star0', '_ab_star1', '_ab_star2', '_a_minus0', '_a_minus1', '_a_minus2', '_a_plus0', '_a_plus1', '_a_plus2', '_b_plus', '_b_minus', '_v_star', '_u_plus', '_u_minus', '_v_previous', # The following three are for solving the # three tridiag systems in parallel '_c1', '_c2', '_c3', # The following two are only necessary for # C code where we cannot deal with scalars # and arrays interchangeably: '_I0_all', '_gtot_all' ], size=self.N, read_only=True) self.Cm = Cm self.Ri = Ri # These explict assignments will load the morphology values from disk # in standalone mode self.distance_ = self.flat_morphology.distance self.length_ = self.flat_morphology.length self.area_ = self.flat_morphology.area self.diameter_ = self.flat_morphology.diameter self.r_length_1_ = self.flat_morphology.r_length_1 self.r_length_2_ = self.flat_morphology.r_length_2 if self.flat_morphology.has_coordinates: self.x_ = self.flat_morphology.x self.y_ = self.flat_morphology.y self.z_ = self.flat_morphology.z # Performs numerical integration step self.add_attribute('diffusion_state_updater') self.diffusion_state_updater = SpatialStateUpdater(self, method, clock=self.clock, order=order) # Update v after the gating variables to obtain consistent Ic and Im self.diffusion_state_updater.order = 1 # Creation of contained_objects that do the work self.contained_objects.extend([self.diffusion_state_updater]) if len(constant_over_dt): self.subexpression_updater = SubexpressionUpdater( self, constant_over_dt) self.contained_objects.append(self.subexpression_updater)
def __init__(self, morphology=None, model=None, threshold=None, refractory=False, reset=None, threshold_location=None, dt=None, clock=None, order=0, Cm=0.9 * uF / cm**2, Ri=150 * ohm * cm, name='spatialneuron*', dtype=None, namespace=None, method=('linear', 'exponential_euler', 'rk2', 'heun')): # #### Prepare and validate equations if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Insert the threshold mechanism at the specified location if threshold_location is not None: if hasattr(threshold_location, '_indices'): # assuming this is a method threshold_location = threshold_location._indices() # for now, only a single compartment allowed if len(threshold_location) == 1: threshold_location = threshold_location[0] else: raise AttributeError(('Threshold can only be applied on a ' 'single location')) threshold = '(' + threshold + ') and (i == ' + str( threshold_location) + ')' # Check flags (we have point currents) model.check_flags({ DIFFERENTIAL_EQUATION: ('point current', ), PARAMETER: ('constant', 'shared', 'linked', 'point current'), SUBEXPRESSION: ('shared', 'point current') }) # Add the membrane potential model += Equations(''' v:volt # membrane potential ''') # Extract membrane equation if 'Im' in model: membrane_eq = model['Im'] # the membrane equation else: raise TypeError('The transmembrane current Im must be defined') # Insert point currents in the membrane equation for eq in model.itervalues(): if 'point current' in eq.flags: fail_for_dimension_mismatch( eq.unit, amp, "Point current " + eq.varname + " should be in amp") eq.flags.remove('point current') membrane_eq.expr = Expression( str(membrane_eq.expr.code) + '+' + eq.varname + '/area') ###### Process model equations (Im) to extract total conductance and the remaining current # Check conditional linearity with respect to v # Match to _A*v+_B var = sp.Symbol('v', real=True) wildcard = sp.Wild('_A', exclude=[var]) constant_wildcard = sp.Wild('_B', exclude=[var]) pattern = wildcard * var + constant_wildcard # Expand expressions in the membrane equation membrane_eq.type = DIFFERENTIAL_EQUATION for var, expr in model._get_substituted_expressions( ): # this returns substituted expressions for diff eqs if var == 'Im': Im_expr = expr membrane_eq.type = SUBEXPRESSION # Factor out the variable s_expr = sp.collect(Im_expr.sympy_expr.expand(), var) matches = s_expr.match(pattern) if matches is None: raise TypeError, "The membrane current must be linear with respect to v" a, b = (matches[wildcard], matches[constant_wildcard]) # Extracts the total conductance from Im, and the remaining current minusa_str, b_str = sympy_to_str(-a), sympy_to_str(b) # Add correct units if necessary if minusa_str == '0': minusa_str += '*siemens/meter**2' if b_str == '0': b_str += '*amp/meter**2' gtot_str = "gtot__private=" + minusa_str + ": siemens/meter**2" I0_str = "I0__private=" + b_str + ": amp/meter**2" model += Equations(gtot_str + "\n" + I0_str) # Equations for morphology # TODO: check whether Cm and Ri are already in the equations # no: should be shared instead of constant # yes: should be constant (check) eqs_constants = Equations(""" diameter : meter (constant) length : meter (constant) x : meter (constant) y : meter (constant) z : meter (constant) distance : meter (constant) area : meter**2 (constant) Cm : farad/meter**2 (constant) Ri : ohm*meter (constant, shared) space_constant = (diameter/(4*Ri*gtot__private))**.5 : meter # Not so sure about the name ### Parameters and intermediate variables for solving the cable equation ab_star0 : siemens/meter**2 ab_plus0 : siemens/meter**2 ab_minus0 : siemens/meter**2 ab_star1 : siemens/meter**2 ab_plus1 : siemens/meter**2 ab_minus1 : siemens/meter**2 ab_star2 : siemens/meter**2 ab_plus2 : siemens/meter**2 ab_minus2 : siemens/meter**2 b_plus : siemens/meter**2 b_minus : siemens/meter**2 v_star : volt u_plus : 1 u_minus : 1 # The following two are only necessary for C code where we cannot deal # with scalars and arrays interchangeably gtot_all : siemens/meter**2 I0_all : amp/meter**2 """) # Possibilities for the name: characteristic_length, electrotonic_length, length_constant, space_constant # Insert morphology self.morphology = morphology # Link morphology variables to neuron's state variables self.morphology_data = MorphologyData(len(morphology)) self.morphology.compress(self.morphology_data) NeuronGroup.__init__(self, len(morphology), model=model + eqs_constants, threshold=threshold, refractory=refractory, reset=reset, method=method, dt=dt, clock=clock, order=order, namespace=namespace, dtype=dtype, name=name) self.Cm = Cm self.Ri = Ri # TODO: View instead of copy for runtime? self.diameter_ = self.morphology_data.diameter self.distance_ = self.morphology_data.distance self.length_ = self.morphology_data.length self.area_ = self.morphology_data.area self.x_ = self.morphology_data.x self.y_ = self.morphology_data.y self.z_ = self.morphology_data.z # Performs numerical integration step self.add_attribute('diffusion_state_updater') self.diffusion_state_updater = SpatialStateUpdater(self, method, clock=self.clock, order=order) # Creation of contained_objects that do the work self.contained_objects.extend([self.diffusion_state_updater])
class Synapses(BrianObject, Group): ''' Class representing synaptic connections. Creating a new `Synapses` object does by default not create any synapses -- you either have to provide the `connect` argument or call the `Synapses.connect` method for that. Parameters ---------- source : `SpikeSource` The source of spikes, e.g. a `NeuronGroup`. target : `Group`, optional The target of the spikes, typically a `NeuronGroup`. If none is given, the same as `source` model : {`str`, `Equations`}, optional The model equations for the synapses. pre : {str, dict}, optional The code that will be executed after every pre-synaptic spike. Can be either a single (possibly multi-line) string, or a dictionary mapping pathway names to code strings. In the first case, the pathway will be called ``pre`` and made available as an attribute of the same name. In the latter case, the given names will be used as the pathway/attribute names. Each pathway has its own code and its own delays. post : {str, dict}, optional The code that will be executed after every post-synaptic spike. Same conventions as for `pre`, the default name for the pathway is ``post``. connect : {str, bool}. optional Determines whether any actual synapses are created. ``False`` (the default) means not to create any synapses, ``True`` means to create synapses between all source/target pairs. Also accepts a string expression that evaluates to ``True`` for every synapse that should be created, e.g. ``'i == j'`` for a one-to-one connectivity. See `Synapses.connect` for more details. delay : {`Quantity`, dict}, optional The delay for the "pre" pathway (same for all synapses) or a dictionary mapping pathway names to delays. If a delay is specified in this way for a pathway, it is stored as a single scalar value. It can still be changed afterwards, but only to a single scalar value. If you want to have delays that vary across synapses, do not use the keyword argument, but instead set the delays via the attribute of the pathway, e.g. ``S.pre.delay = ...`` (or ``S.delay = ...`` as an abbreviation), ``S.post.delay = ...``, etc. namespace : dict, optional A dictionary mapping identifier names to objects. If not given, the namespace will be filled in at the time of the call of `Network.run`, with either the values from the ``network`` argument of the `Network.run` method or from the local context, if no such argument is given. dtype : `dtype`, optional The standard datatype for all state variables. Defaults to `core.default_scalar_type`. codeobj_class : class, optional The `CodeObject` class to use to run code. clock : `Clock`, optional The clock to use. method : {str, `StateUpdateMethod`}, optional The numerical integration method to use. If none is given, an appropriate one is automatically determined. name : str, optional The name for this object. If none is given, a unique name of the form ``synapses``, ``synapses_1``, etc. will be automatically chosen. ''' def __init__(self, source, target=None, model=None, pre=None, post=None, connect=False, delay=None, namespace=None, dtype=None, codeobj_class=None, clock=None, method=None, name='synapses*'): BrianObject.__init__(self, when=clock, name=name) self.codeobj_class = codeobj_class self.source = weakref.proxy(source) if target is None: self.target = self.source else: self.target = weakref.proxy(target) ##### Prepare and validate equations if model is None: model = '' if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Check flags model.check_flags({DIFFERENTIAL_EQUATION: ['event-driven', 'lumped'], STATIC_EQUATION: ['lumped'], PARAMETER: ['constant', 'lumped']}) # Separate the equations into event-driven and continuously updated # equations event_driven = [] continuous = [] for single_equation in model.itervalues(): if 'event-driven' in single_equation.flags: if 'lumped' in single_equation.flags: raise ValueError(('Event-driven variable %s cannot be ' 'a lumped variable.') % single_equation.varname) event_driven.append(single_equation) else: continuous.append(single_equation) # Add the lastupdate variable, used by event-driven equations continuous.append(SingleEquation(PARAMETER, 'lastupdate', second)) if len(event_driven): self.event_driven = Equations(event_driven) else: self.event_driven = None self.equations = Equations(continuous) ##### Setup the memory self.arrays = self._allocate_memory(dtype=dtype) # Setup the namespace self._given_namespace = namespace self.namespace = create_namespace(namespace) self._queues = {} self._delays = {} self.item_mapping = SynapticItemMapping(self) self.indices = {'_idx': self.item_mapping, '_presynaptic_idx': self.item_mapping.synaptic_pre, '_postsynaptic_idx': self.item_mapping.synaptic_post} # Allow S.i instead of S.indices.i, etc. self.i = self.item_mapping.i self.j = self.item_mapping.j self.k = self.item_mapping.k # Setup variables self.variables = self._create_variables() #: List of names of all updaters, e.g. ['pre', 'post'] self._updaters = [] for prepost, argument in zip(('pre', 'post'), (pre, post)): if not argument: continue if isinstance(argument, basestring): self._add_updater(argument, prepost) elif isinstance(argument, collections.Mapping): for key, value in argument.iteritems(): if not isinstance(key, basestring): err_msg = ('Keys for the "{}" argument' 'have to be strings, got ' '{} instead.').format(prepost, type(key)) raise TypeError(err_msg) self._add_updater(value, prepost, objname=key) # If we have a pathway called "pre" (the most common use case), provide # direct access to its delay via a delay attribute (instead of having # to use pre.delay) if 'pre' in self._updaters: self.variables['delay'] = self.pre.variables['delay'] if delay is not None: if isinstance(delay, Quantity): if not 'pre' in self._updaters: raise ValueError(('Cannot set delay, no "pre" pathway exists.' 'Use a dictionary if you want to set the ' 'delay for a pathway with a different name.')) delay = {'pre': delay} if not isinstance(delay, collections.Mapping): raise TypeError('Delay argument has to be a quantity or a ' 'dictionary, is type %s instead.' % type(delay)) for pathway, pathway_delay in delay.iteritems(): if not pathway in self._updaters: raise ValueError(('Cannot set the delay for pathway ' '"%s": unknown pathway.') % pathway) if not isinstance(pathway_delay, Quantity): raise TypeError(('Cannot set the delay for pathway "%s": ' 'expected a quantity, got %s instead.') % (pathway, type(pathway_delay))) if pathway_delay.size != 1: raise TypeError(('Cannot set the delay for pathway "%s": ' 'expected a scalar quantity, got a ' 'quantity with shape %s instead.') % str(pathway_delay.shape)) fail_for_dimension_mismatch(pathway_delay, second, ('Delay has to be ' 'specified in units ' 'of seconds')) updater = getattr(self, pathway) self.item_mapping.unregister_variable(updater._delays) del updater._delays # For simplicity, store the delay as a one-element array # so that for example updater._delays[:] works. updater._delays = np.array([float(pathway_delay)]) variable = ArrayVariable('delay', second, updater._delays, group_name=self.name, scalar=True) updater.variables['delay'] = variable if pathway == 'pre': self.variables['delay'] = variable #: Performs numerical integration step self.state_updater = StateUpdater(self, method) self.contained_objects.append(self.state_updater) #: "Lumped variable" mechanism -- sum over all synapses of a #: postsynaptic target self.lumped_updaters = {} for single_equation in self.equations.itervalues(): if 'lumped' in single_equation.flags: varname = single_equation.varname # For a lumped variable, we need an equivalent parameter in the # target group if not varname in self.target.variables: raise ValueError(('The lumped variable %s needs a variable ' 'of the same name in the target ' 'group ') % single_equation.varname) fail_for_dimension_mismatch(self.variables[varname].unit, self.target.variables[varname], ('Lumped variables need to have ' 'the same units in Synapses ' 'and the target group')) # TODO: Add some more stringent check about the type of # variable in the target group updater = LumpedUpdater(varname, self, self.target) self.lumped_updaters[varname] = updater self.contained_objects.append(updater) # Do an initial connect, if requested if not isinstance(connect, (bool, basestring)): raise TypeError(('"connect" keyword has to be a boolean value or a ' 'string, is type %s instead.' % type(connect))) self._initial_connect = connect if not connect is False: self.connect(connect, level=1) # Activate name attribute access Group.__init__(self) N = property(fget=lambda self: self.item_mapping.N, doc='Total number of synapses') def __len__(self): return self.N def pre_run(self, namespace): self.lastupdate = self.clock.t super(Synapses, self).pre_run(namespace) def _add_updater(self, code, prepost, objname=None): ''' Add a new target updater. Users should call `add_pre` or `add_post` instead. Parameters ---------- code : str The abstract code that should be executed on pre-/postsynaptic spikes. prepost : {'pre', 'post'} Whether the code is triggered by presynaptic or postsynaptic spikes objname : str, optional A name for the object, see `SynapticPathway` for more details. Returns ------- objname : str The final name for the object. Equals `objname` if it was explicitly given (and did not end in a wildcard character). ''' if prepost == 'pre': spike_group, group_name = self.source, 'Source' elif prepost == 'post': spike_group = self.target, 'Target' else: raise ValueError(('"prepost" argument has to be "pre" or "post", ' 'is "%s".') % prepost) if not hasattr(spike_group, 'spikes') and hasattr(spike_group, 'clock'): raise TypeError(('%s has to be a SpikeSource with spikes and' ' clock attribute. Is type %r instead') % (group_name, type(spike_group))) updater = SynapticPathway(self, code, prepost, objname) objname = updater.objname if hasattr(self, objname): raise ValueError(('Cannot add updater with name "{name}", synapses ' 'object already has an attribute with this ' 'name.').format(name=objname)) setattr(self, objname, updater) self._updaters.append(objname) self.contained_objects.append(updater) return objname def _create_variables(self): ''' Create the variables dictionary for this `Synapses`, containing entries for the equation variables and some standard entries. ''' # Add all the pre and post variables with _pre and _post suffixes v = {} self.variable_indices = defaultdict(lambda: '_idx') for name, var in getattr(self.source, 'variables', {}).iteritems(): if isinstance(var, (ArrayVariable, Subexpression)): v[name + '_pre'] = var self.variable_indices[name + '_pre'] = '_presynaptic_idx' for name, var in getattr(self.target, 'variables', {}).iteritems(): if isinstance(var, (ArrayVariable, Subexpression)): v[name + '_post'] = var self.variable_indices[name + '_post'] = '_postsynaptic_idx' # Also add all the post variables without a suffix -- if this # clashes with the name of a state variable defined in this # Synapses group, the latter will overwrite the entry later and # take precedence v[name] = var self.variable_indices[name] = '_postsynaptic_idx' # Standard variables always present v.update({'t': AttributeVariable(second, self.clock, 't_', constant=False), 'dt': AttributeVariable(second, self.clock, 'dt_', constant=True), '_num_source_neurons': Variable(Unit(1), len(self.source), constant=True), '_num_target_neurons': Variable(Unit(1), len(self.target), constant=True), '_synaptic_pre': DynamicArrayVariable('_synaptic_pre', Unit(1), self.item_mapping.synaptic_pre), '_synaptic_post': DynamicArrayVariable('_synaptic_pre', Unit(1), self.item_mapping.synaptic_post), # We don't need "proper" specifier for these -- they go # back to Python code currently '_pre_synaptic': Variable(Unit(1), self.item_mapping.pre_synaptic), '_post_synaptic': Variable(Unit(1), self.item_mapping.post_synaptic)}) for eq in itertools.chain(self.equations.itervalues(), self.event_driven.itervalues() if self.event_driven is not None else []): if eq.type in (DIFFERENTIAL_EQUATION, PARAMETER): array = self.arrays[eq.varname] constant = ('constant' in eq.flags) # We are dealing with dynamic arrays here, code generation # shouldn't directly access the specifier.array attribute but # use specifier.get_value() to get a reference to the underlying # array v[eq.varname] = DynamicArrayVariable(eq.varname, eq.unit, array, group_name=self.name, constant=constant, is_bool=eq.is_bool) if eq.varname in self.variable_indices: # we are overwriting a postsynaptic variable of the same # name, delete the reference to the postsynaptic index del self.variable_indices[eq.varname] # Register the array with the `SynapticItemMapping` object so # it gets automatically resized self.item_mapping.register_variable(array) elif eq.type == STATIC_EQUATION: v.update({eq.varname: Subexpression(eq.unit, brian_prefs['core.default_scalar_dtype'], str(eq.expr), variables=v, namespace=self.namespace, is_bool=eq.is_bool)}) else: raise AssertionError('Unknown type of equation: ' + eq.eq_type) # Stochastic variables for xi in self.equations.stochastic_variables: v.update({xi: StochasticVariable()}) return v def _allocate_memory(self, dtype=None): # Allocate memory (TODO: this should be refactored somewhere at some point) arrayvarnames = set(eq.varname for eq in self.equations.itervalues() if eq.type in (DIFFERENTIAL_EQUATION, PARAMETER)) if self.event_driven is not None: # Only differential equations are event-driven arrayvarnames |= set(eq.varname for eq in self.event_driven.itervalues()) arrays = {} for name in arrayvarnames: if isinstance(dtype, dict): curdtype = dtype[name] else: curdtype = dtype if curdtype is None: curdtype = brian_prefs['core.default_scalar_dtype'] arrays[name] = DynamicArray1D(0) logger.debug("NeuronGroup memory allocated successfully.") return arrays def connect_one_to_one(self): ''' Manually create a one to one connectivity pattern ''' if len(self.source) != len(self.target): raise TypeError('Can only create synapses between groups of same size') self.connect(np.arange(len(self.source)), np.arange(len(self.target))) def connect_full(self): ''' Connect all neurons in the source group to all neurons in the target group. ''' sources, targets = np.meshgrid(np.arange(len(self.source)), np.arange(len(self.target))) self.connect(sources.flat(), targets.flat()) def connect(self, pre_or_cond, post=None, p=1., n=1, level=0): ''' Add synapses. The first argument can be either a presynaptic index (int or array) or a condition for synapse creation in the form of a string that evaluates to a boolean value (or directly a boolean value). If it is given as an index, also `post` has to be present. A string condition will be evaluated for all pre-/postsynaptic indices, which can be referred to as `i` and `j`. Parameters ---------- pre_or_cond : {int, ndarray of int, bool, str} The presynaptic neurons (in the form of an index or an array of indices) or a boolean value or a string that evaluates to a boolean value. If it is an index, then also `post` has to be given. post_neurons : {int, ndarray of int), optional GroupIndices of neurons from the target group. Non-optional if one or more presynaptic indices have been given. p : float, optional The probability to create `n` synapses wherever the condition given as `pre_or_cond` evaluates to true or for the given pre/post indices. n : int, optional The number of synapses to create per pre/post connection pair. Defaults to 1. Examples -------- >>> from brian2 import * >>> import numpy as np >>> G = NeuronGroup(10, 'dv/dt = -v / tau : 1', threshold='v>1', reset='v=0') >>> S = Synapses(G, G, 'w:1', pre='v+=w') >>> S.connect('i != j') # all-to-all but no self-connections >>> S.connect(0, 0) # connect neuron 0 to itself >>> S.connect(np.array([1, 2]), np.array([2, 1])) # connect 1->2 and 2->1 >>> S.connect(True) # connect all-to-all >>> S.connect('i != j', p=0.1) # Connect neurons with 10% probability, exclude self-connections >>> S.connect('i == j', n=2) # Connect all neurons to themselves with 2 synapses ''' if not isinstance(pre_or_cond, (bool, basestring)): pre_or_cond = np.asarray(pre_or_cond) if not np.issubdtype(pre_or_cond.dtype, np.int): raise TypeError(('Presynaptic indices have to be given as ' 'integers, are type %s instead.') % pre_or_cond.dtype) post = np.asarray(post) if not np.issubdtype(post.dtype, np.int): raise TypeError(('Presynaptic indices can only be combined ' 'with postsynaptic integer indices))')) if isinstance(n, basestring): raise TypeError(('GroupIndices cannot be combined with a string' 'expression for n. Either use an array/scalar ' 'for n, or a string expression for the ' 'connections')) i, j, n = np.broadcast_arrays(pre_or_cond, post, n) if i.ndim > 1: raise ValueError('Can only use 1-dimensional indices') self.item_mapping._add_synapses(i, j, n, p, level=level+1) elif isinstance(pre_or_cond, (basestring, bool)): if pre_or_cond is False: return # nothing to do... elif pre_or_cond is True: # TODO: This should not be handled with the general mechanism pre_or_cond = 'True' if post is not None: raise ValueError('Cannot give a postsynaptic index when ' 'using a string expression') if not isinstance(n, (int, basestring)): raise TypeError('n has to be an integer or a string evaluating ' 'to an integer, is type %s instead.' % type(n)) if not isinstance(p, (float, basestring)): raise TypeError('p has to be a float or a string evaluating ' 'to an float, is type %s instead.' % type(n)) self.item_mapping._add_synapses(None, None, n, p, condition=pre_or_cond, level=level+1) else: raise TypeError(('First argument has to be an index or a ' 'string, is %s instead.') % type(pre_or_cond))
def __init__(self, N, model, method=None, threshold=None, reset=None, refractory=False, namespace=None, dtype=None, clock=None, name='neurongroup*', codeobj_class=None): BrianObject.__init__(self, when=clock, name=name) self.codeobj_class = codeobj_class try: self.N = N = int(N) except ValueError: if isinstance(N, str): raise TypeError( "First NeuronGroup argument should be size, not equations." ) raise if N < 1: raise ValueError("NeuronGroup size should be at least 1, was " + str(N)) ##### Prepare and validate equations if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Check flags model.check_flags({ DIFFERENTIAL_EQUATION: ('unless-refractory'), PARAMETER: ('constant') }) # add refractoriness model = add_refractoriness(model) self.equations = model uses_refractoriness = len(model) and any([ 'unless-refractory' in eq.flags for eq in model.itervalues() if eq.type == DIFFERENTIAL_EQUATION ]) logger.debug("Creating NeuronGroup of size {self.N}, " "equations {self.equations}.".format(self=self)) ##### Setup the memory self.arrays = self._allocate_memory(dtype=dtype) self._spikespace = np.zeros(N + 1, dtype=np.int32) # Setup the namespace self.namespace = create_namespace(namespace) # Setup variables self.variables = self._create_variables() # All of the following will be created in pre_run #: The threshold condition self.threshold = threshold #: The reset statement(s) self.reset = reset #: The refractory condition or timespan self._refractory = refractory if uses_refractoriness and refractory is False: logger.warn( 'Model equations use the "unless-refractory" flag but ' 'no refractory keyword was given.', 'no_refractory') #: The state update method selected by the user self.method_choice = method #: Performs thresholding step, sets the value of `spikes` self.thresholder = None if self.threshold is not None: self.thresholder = Thresholder(self) #: Resets neurons which have spiked (`spikes`) self.resetter = None if self.reset is not None: self.resetter = Resetter(self) # We try to run a pre_run already now. This might fail because of an # incomplete namespace but if the namespace is already complete we # can spot unit or syntax errors already here, at creation time. try: self.pre_run(None) except KeyError: pass #: Performs numerical integration step self.state_updater = StateUpdater(self, method) # Creation of contained_objects that do the work self.contained_objects.append(self.state_updater) if self.thresholder is not None: self.contained_objects.append(self.thresholder) if self.resetter is not None: self.contained_objects.append(self.resetter) # Activate name attribute access Group.__init__(self) # Set the refractoriness information self.lastspike = -np.inf * second self.not_refractory = True
def __init__(self, source, target=None, model=None, pre=None, post=None, connect=False, delay=None, namespace=None, dtype=None, codeobj_class=None, clock=None, method=None, name='synapses*'): BrianObject.__init__(self, when=clock, name=name) self.codeobj_class = codeobj_class self.source = weakref.proxy(source) if target is None: self.target = self.source else: self.target = weakref.proxy(target) ##### Prepare and validate equations if model is None: model = '' if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Check flags model.check_flags({DIFFERENTIAL_EQUATION: ['event-driven', 'lumped'], STATIC_EQUATION: ['lumped'], PARAMETER: ['constant', 'lumped']}) # Separate the equations into event-driven and continuously updated # equations event_driven = [] continuous = [] for single_equation in model.itervalues(): if 'event-driven' in single_equation.flags: if 'lumped' in single_equation.flags: raise ValueError(('Event-driven variable %s cannot be ' 'a lumped variable.') % single_equation.varname) event_driven.append(single_equation) else: continuous.append(single_equation) # Add the lastupdate variable, used by event-driven equations continuous.append(SingleEquation(PARAMETER, 'lastupdate', second)) if len(event_driven): self.event_driven = Equations(event_driven) else: self.event_driven = None self.equations = Equations(continuous) ##### Setup the memory self.arrays = self._allocate_memory(dtype=dtype) # Setup the namespace self._given_namespace = namespace self.namespace = create_namespace(namespace) self._queues = {} self._delays = {} self.item_mapping = SynapticItemMapping(self) self.indices = {'_idx': self.item_mapping, '_presynaptic_idx': self.item_mapping.synaptic_pre, '_postsynaptic_idx': self.item_mapping.synaptic_post} # Allow S.i instead of S.indices.i, etc. self.i = self.item_mapping.i self.j = self.item_mapping.j self.k = self.item_mapping.k # Setup variables self.variables = self._create_variables() #: List of names of all updaters, e.g. ['pre', 'post'] self._updaters = [] for prepost, argument in zip(('pre', 'post'), (pre, post)): if not argument: continue if isinstance(argument, basestring): self._add_updater(argument, prepost) elif isinstance(argument, collections.Mapping): for key, value in argument.iteritems(): if not isinstance(key, basestring): err_msg = ('Keys for the "{}" argument' 'have to be strings, got ' '{} instead.').format(prepost, type(key)) raise TypeError(err_msg) self._add_updater(value, prepost, objname=key) # If we have a pathway called "pre" (the most common use case), provide # direct access to its delay via a delay attribute (instead of having # to use pre.delay) if 'pre' in self._updaters: self.variables['delay'] = self.pre.variables['delay'] if delay is not None: if isinstance(delay, Quantity): if not 'pre' in self._updaters: raise ValueError(('Cannot set delay, no "pre" pathway exists.' 'Use a dictionary if you want to set the ' 'delay for a pathway with a different name.')) delay = {'pre': delay} if not isinstance(delay, collections.Mapping): raise TypeError('Delay argument has to be a quantity or a ' 'dictionary, is type %s instead.' % type(delay)) for pathway, pathway_delay in delay.iteritems(): if not pathway in self._updaters: raise ValueError(('Cannot set the delay for pathway ' '"%s": unknown pathway.') % pathway) if not isinstance(pathway_delay, Quantity): raise TypeError(('Cannot set the delay for pathway "%s": ' 'expected a quantity, got %s instead.') % (pathway, type(pathway_delay))) if pathway_delay.size != 1: raise TypeError(('Cannot set the delay for pathway "%s": ' 'expected a scalar quantity, got a ' 'quantity with shape %s instead.') % str(pathway_delay.shape)) fail_for_dimension_mismatch(pathway_delay, second, ('Delay has to be ' 'specified in units ' 'of seconds')) updater = getattr(self, pathway) self.item_mapping.unregister_variable(updater._delays) del updater._delays # For simplicity, store the delay as a one-element array # so that for example updater._delays[:] works. updater._delays = np.array([float(pathway_delay)]) variable = ArrayVariable('delay', second, updater._delays, group_name=self.name, scalar=True) updater.variables['delay'] = variable if pathway == 'pre': self.variables['delay'] = variable #: Performs numerical integration step self.state_updater = StateUpdater(self, method) self.contained_objects.append(self.state_updater) #: "Lumped variable" mechanism -- sum over all synapses of a #: postsynaptic target self.lumped_updaters = {} for single_equation in self.equations.itervalues(): if 'lumped' in single_equation.flags: varname = single_equation.varname # For a lumped variable, we need an equivalent parameter in the # target group if not varname in self.target.variables: raise ValueError(('The lumped variable %s needs a variable ' 'of the same name in the target ' 'group ') % single_equation.varname) fail_for_dimension_mismatch(self.variables[varname].unit, self.target.variables[varname], ('Lumped variables need to have ' 'the same units in Synapses ' 'and the target group')) # TODO: Add some more stringent check about the type of # variable in the target group updater = LumpedUpdater(varname, self, self.target) self.lumped_updaters[varname] = updater self.contained_objects.append(updater) # Do an initial connect, if requested if not isinstance(connect, (bool, basestring)): raise TypeError(('"connect" keyword has to be a boolean value or a ' 'string, is type %s instead.' % type(connect))) self._initial_connect = connect if not connect is False: self.connect(connect, level=1) # Activate name attribute access Group.__init__(self)
def __init__(self, N, model, method=('linear', 'euler', 'heun'), threshold=None, reset=None, refractory=False, events=None, namespace=None, dtype=None, dt=None, clock=None, order=0, name='neurongroup*', codeobj_class=None): Group.__init__(self, dt=dt, clock=clock, when='start', order=order, name=name) self.codeobj_class = codeobj_class try: self._N = N = int(N) except ValueError: if isinstance(N, str): raise TypeError("First NeuronGroup argument should be size, not equations.") raise if N < 1: raise ValueError("NeuronGroup size should be at least 1, was " + str(N)) self.start = 0 self.stop = self._N ##### Prepare and validate equations if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Check flags model.check_flags({DIFFERENTIAL_EQUATION: ('unless refractory',), PARAMETER: ('constant', 'shared', 'linked'), SUBEXPRESSION: ('shared',)}) # add refractoriness if refractory is not False: model = add_refractoriness(model) self.equations = model uses_refractoriness = len(model) and any(['unless refractory' in eq.flags for eq in model.itervalues() if eq.type == DIFFERENTIAL_EQUATION]) self._linked_variables = set() logger.debug("Creating NeuronGroup of size {self._N}, " "equations {self.equations}.".format(self=self)) if namespace is None: namespace = {} #: The group-specific namespace self.namespace = namespace # All of the following will be created in before_run #: The refractory condition or timespan self._refractory = refractory if uses_refractoriness and refractory is False: logger.warn('Model equations use the "unless refractory" flag but ' 'no refractory keyword was given.', 'no_refractory') #: The state update method selected by the user self.method_choice = method if events is None: events = {} if threshold is not None: if 'spike' in events: raise ValueError(("The NeuronGroup defines both a threshold " "and a 'spike' event")) events['spike'] = threshold # Setup variables # Since we have to create _spikespace and possibly other "eventspace" # variables, we pass the supported events self._create_variables(dtype, events=events.keys()) #: Events supported by this group self.events = events #: Code that is triggered on events (e.g. reset) self.event_codes = {} #: Checks the spike threshold (or abitrary user-defined events) self.thresholder = {} #: Reset neurons which have spiked (or perform arbitrary actions for #: user-defined events) self.resetter = {} for event_name in events.iterkeys(): if not isinstance(event_name, basestring): raise TypeError(('Keys in the "events" dictionary have to be ' 'strings, not type %s.') % type(event_name)) if not _valid_event_name(event_name): raise TypeError(("The name '%s' cannot be used as an event " "name.") % event_name) # By default, user-defined events are checked after the threshold when = 'thresholds' if event_name == 'spike' else 'after_thresholds' # creating a Thresholder will take care of checking the validity # of the condition thresholder = Thresholder(self, event=event_name, when=when) self.thresholder[event_name] = thresholder self.contained_objects.append(thresholder) if reset is not None: self.run_on_event('spike', reset, when='resets') # We try to run a before_run already now. This might fail because of an # incomplete namespace but if the namespace is already complete we # can spot unit errors in the equation already here. try: self.before_run(None) except KeyError: pass #: Performs numerical integration step self.state_updater = StateUpdater(self, method) # Creation of contained_objects that do the work self.contained_objects.append(self.state_updater) if refractory is not False: # Set the refractoriness information self.variables['lastspike'].set_value(-np.inf*second) self.variables['not_refractory'].set_value(True) # Activate name attribute access self._enable_group_attributes()
def simulate(): common_params = { # Parameters common to all neurons. 'C': 100 * b2.pfarad, 'tau_m': 10 * b2.ms, 'EL': -60 * b2.mV, 'DeltaT': 2 * b2.mV, 'Vreset': -65, # *b2.mV 'VTmean': -50 * b2.mV, 'VTsd': 2 * b2.mV } common_params['gL'] = common_params['C'] / common_params['tau_m'] E_cell_params = dict( common_params, **{ 'Ncells': num_E_cells, 'IXmean': 30 * b2.pA, 'IXsd': 20 * b2.pA }) eqs = Equations(""" Im = IX + gL * (EL - vm) + gL * DeltaT * exp((vm - VT) / DeltaT) - ge * (vm - Erev_e) - gi * (vm - Erev_i) - gx * (vm - Erev_x) : amp dgi/dt = (1*nsiemens-gi)/Tau_i - gi/Tau_i : siemens dgx/dt = (1*nsiemens-gx)/Tau_x - gx/Tau_x : siemens dge/dt = (1*nsiemens-ge)/Tau_e - ge/Tau_e : siemens VT : volt IX : amp dvm/dt = Im / C : volt """) param_E_syn = { "Erev_i": 0.0 * b2.mV, "Erev_x": 0.0 * b2.mV, "Erev_e": -80.0 * b2.mV, "Tau_i": 3.0 * b2.ms, "Tau_e": 4.0 * b2.ms, "Tau_x": 4.0 * b2.ms, "w_i": 0.6, # *b2.nsiemens, # Peak conductance # *b2.nsiemens, # Peak conductance (1 in paper) "w_x": 1.4, "w_e": 0.1, # *b2.nsiemens, # Peak conductance "p_i": 0.1, # ./I_cell_params['Ncells'], # ! 200 "p_e": 0.05, # /E_cell_params['Ncells'], # ! 400 } if state == "beta": param_E_syn['w_x'] = 0.55 * b2.nS param_E_syn['Tau_x'] = 12 * b2.ms param_E_syn['w_e'] = 0.05 * b2.nS param_E_syn['Tau_e'] = 12 * b2.ms param_E_syn['w_i'] = 0.1 * b2.nS param_E_syn['Tau_i'] = 15 * b2.ms E_cells = b2.NeuronGroup(E_cell_params['Ncells'], model=eqs, method=integration_method, threshold="vm > 0.*mV", reset="vm={}*mV".format(E_cell_params['Vreset']), refractory="vm > 0.*mV", namespace={ **common_params, **param_E_syn, }) # Poisson_to_E = b2.PoissonGroup( # E_cell_params['Ncells'], rates=input_rates()) # ! input_rates cEE = b2.Synapses(E_cells, E_cells, on_pre='ge+={}*nsiemens'.format(param_E_syn["w_e"])) # cEX = b2.Synapses(Poisson_to_E, # E_cells, # method=integration_method, # on_pre="gx += {}*nsiemens".format(param_E_syn["w_x"])) # cEX.connect(j='i') # Initialise random parameters. E_cells.VT = (randn(len(E_cells)) * E_cell_params['VTsd'] + E_cell_params['VTmean']) E_cells.IX = (randn(len(E_cells)) * E_cell_params['IXsd'] + E_cell_params['IXmean']) spike_monitor_E = b2.SpikeMonitor(E_cells) rate_monitor_E = b2.PopulationRateMonitor(E_cells) state_monitor_E = state_monitor_I = None if record_volrages: state_monitor_E = b2.StateMonitor(E_cells, "vm", record=True) state_monitor_I = b2.StateMonitor(I_cells, "vm", record=True) net = b2.Network(E_cells) if record_volrages: net.add(state_monitor_E) net.add(spike_monitor_E) net.add(rate_monitor_E) # net.add(cEX) # Randomise initial membrane potentials. E_cells.vm = randn(len(E_cells)) * 10 * b2.mV - 60 * b2.mV print('Simulation running...') start_time = time.time() b2.run(sim_duration * b2.ms) duration = time.time() - start_time print('Simulation time:', duration, 'seconds')
def __init__(self, N, model, method=None, threshold=None, reset=None, refractory=False, namespace=None, dtype=None, clock=None, name='neurongroup*', codeobj_class=None): Group.__init__(self, when=clock, name=name) self.codeobj_class = codeobj_class try: self._N = N = int(N) except ValueError: if isinstance(N, str): raise TypeError("First NeuronGroup argument should be size, not equations.") raise if N < 1: raise ValueError("NeuronGroup size should be at least 1, was " + str(N)) self.start = 0 self.stop = self._N ##### Prepare and validate equations if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Check flags model.check_flags({DIFFERENTIAL_EQUATION: ('unless refractory'), PARAMETER: ('constant')}) # add refractoriness if refractory is not False: model = add_refractoriness(model) self.equations = model uses_refractoriness = len(model) and any(['unless refractory' in eq.flags for eq in model.itervalues() if eq.type == DIFFERENTIAL_EQUATION]) logger.debug("Creating NeuronGroup of size {self._N}, " "equations {self.equations}.".format(self=self)) # Setup the namespace self.namespace = create_namespace(namespace) # Setup variables self._create_variables(dtype) # All of the following will be created in before_run #: The threshold condition self.threshold = threshold #: The reset statement(s) self.reset = reset #: The refractory condition or timespan self._refractory = refractory if uses_refractoriness and refractory is False: logger.warn('Model equations use the "unless refractory" flag but ' 'no refractory keyword was given.', 'no_refractory') #: The state update method selected by the user self.method_choice = method #: Performs thresholding step, sets the value of `spikes` self.thresholder = None if self.threshold is not None: self.thresholder = Thresholder(self) #: Resets neurons which have spiked (`spikes`) self.resetter = None if self.reset is not None: self.resetter = Resetter(self) # We try to run a before_run already now. This might fail because of an # incomplete namespace but if the namespace is already complete we # can spot unit errors in the equation already here. try: self.before_run(None) except KeyError: pass #: Performs numerical integration step self.state_updater = StateUpdater(self, method) # Creation of contained_objects that do the work self.contained_objects.append(self.state_updater) if self.thresholder is not None: self.contained_objects.append(self.thresholder) if self.resetter is not None: self.contained_objects.append(self.resetter) if refractory is not False: # Set the refractoriness information self.variables['lastspike'].set_value(-np.inf*second) self.variables['not_refractory'].set_value(True) # Activate name attribute access self._enable_group_attributes()
def __init__(self, morphology=None, model=None, threshold=None, refractory=False, reset=None, events=None, threshold_location=None, dt=None, clock=None, order=0, Cm=0.9 * uF / cm ** 2, Ri=150 * ohm * cm, name='spatialneuron*', dtype=None, namespace=None, method=('exact', 'exponential_euler', 'rk2', 'heun'), method_options=None): # #### Prepare and validate equations if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Insert the threshold mechanism at the specified location if threshold_location is not None: if hasattr(threshold_location, '_indices'): # assuming this is a method threshold_location = threshold_location._indices() # for now, only a single compartment allowed if len(threshold_location) == 1: threshold_location = threshold_location[0] else: raise AttributeError(('Threshold can only be applied on a ' 'single location')) threshold = '(' + threshold + ') and (i == ' + str(threshold_location) + ')' # Check flags (we have point currents) model.check_flags({DIFFERENTIAL_EQUATION: ('point current',), PARAMETER: ('constant', 'shared', 'linked', 'point current'), SUBEXPRESSION: ('shared', 'point current', 'constant over dt')}) #: The original equations as specified by the user (i.e. before #: inserting point-currents into the membrane equation, before adding #: all the internally used variables and constants, etc.). self.user_equations = model # Separate subexpressions depending whether they are considered to be # constant over a time step or not (this would also be done by the # NeuronGroup initializer later, but this would give incorrect results # for the linearity check) model, constant_over_dt = extract_constant_subexpressions(model) # Extract membrane equation if 'Im' in model: if len(model['Im'].flags): raise TypeError('Cannot specify any flags for the transmembrane ' 'current Im.') membrane_expr = model['Im'].expr # the membrane equation else: raise TypeError('The transmembrane current Im must be defined') model_equations = [] # Insert point currents in the membrane equation for eq in model.itervalues(): if eq.varname == 'Im': continue # ignore -- handled separately if 'point current' in eq.flags: fail_for_dimension_mismatch(eq.dim, amp, "Point current " + eq.varname + " should be in amp") membrane_expr = Expression( str(membrane_expr.code) + '+' + eq.varname + '/area') eq = SingleEquation(eq.type, eq.varname, eq.dim, expr=eq.expr, flags=list(set(eq.flags)-set(['point current']))) model_equations.append(eq) model_equations.append(SingleEquation(SUBEXPRESSION, 'Im', dimensions=(amp/meter**2).dim, expr=membrane_expr)) model_equations.append(SingleEquation(PARAMETER, 'v', volt.dim)) model = Equations(model_equations) ###### Process model equations (Im) to extract total conductance and the remaining current # Expand expressions in the membrane equation for var, expr in model.get_substituted_expressions(include_subexpressions=True): if var == 'Im': Im_expr = expr break else: raise AssertionError('Model equations did not contain Im!') # Differentiate Im with respect to v Im_sympy_exp = str_to_sympy(Im_expr.code) v_sympy = sp.Symbol('v', real=True) diffed = sp.diff(Im_sympy_exp, v_sympy) unevaled_derivatives = diffed.atoms(sp.Derivative) if len(unevaled_derivatives): raise TypeError('Cannot take the derivative of "{Im}" with respect ' 'to v.'.format(Im=Im_expr.code)) gtot_str = sympy_to_str(sp.simplify(-diffed)) I0_str = sympy_to_str(sp.simplify(Im_sympy_exp - diffed*v_sympy)) if gtot_str == '0': gtot_str += '*siemens/meter**2' if I0_str == '0': I0_str += '*amp/meter**2' gtot_str = "gtot__private=" + gtot_str + ": siemens/meter**2" I0_str = "I0__private=" + I0_str + ": amp/meter**2" model += Equations(gtot_str + "\n" + I0_str) # Insert morphology (store a copy) self.morphology = copy.deepcopy(morphology) # Flatten the morphology self.flat_morphology = FlatMorphology(morphology) # Equations for morphology # TODO: check whether Cm and Ri are already in the equations # no: should be shared instead of constant # yes: should be constant (check) eqs_constants = Equations(""" length : meter (constant) distance : meter (constant) area : meter**2 (constant) volume : meter**3 Ic : amp/meter**2 diameter : meter (constant) Cm : farad/meter**2 (constant) Ri : ohm*meter (constant, shared) r_length_1 : meter (constant) r_length_2 : meter (constant) time_constant = Cm/gtot__private : second space_constant = (2/pi)**(1.0/3.0) * (area/(1/r_length_1 + 1/r_length_2))**(1.0/6.0) / (2*(Ri*gtot__private)**(1.0/2.0)) : meter """) if self.flat_morphology.has_coordinates: eqs_constants += Equations(''' x : meter (constant) y : meter (constant) z : meter (constant) ''') NeuronGroup.__init__(self, morphology.total_compartments, model=model + eqs_constants, method_options=method_options, threshold=threshold, refractory=refractory, reset=reset, events=events, method=method, dt=dt, clock=clock, order=order, namespace=namespace, dtype=dtype, name=name) # Parameters and intermediate variables for solving the cable equations # Note that some of these variables could have meaningful physical # units (e.g. _v_star is in volt, _I0_all is in amp/meter**2 etc.) but # since these variables should never be used in user code, we don't # assign them any units self.variables.add_arrays(['_ab_star0', '_ab_star1', '_ab_star2', '_b_plus', '_b_minus', '_v_star', '_u_plus', '_u_minus', '_v_previous', '_c', # The following two are only necessary for # C code where we cannot deal with scalars # and arrays interchangeably: '_I0_all', '_gtot_all'], size=self.N, read_only=True) self.Cm = Cm self.Ri = Ri # These explict assignments will load the morphology values from disk # in standalone mode self.distance_ = self.flat_morphology.distance self.length_ = self.flat_morphology.length self.area_ = self.flat_morphology.area self.diameter_ = self.flat_morphology.diameter self.r_length_1_ = self.flat_morphology.r_length_1 self.r_length_2_ = self.flat_morphology.r_length_2 if self.flat_morphology.has_coordinates: self.x_ = self.flat_morphology.x self.y_ = self.flat_morphology.y self.z_ = self.flat_morphology.z # Performs numerical integration step self.add_attribute('diffusion_state_updater') self.diffusion_state_updater = SpatialStateUpdater(self, method, clock=self.clock, order=order) # Update v after the gating variables to obtain consistent Ic and Im self.diffusion_state_updater.order = 1 # Creation of contained_objects that do the work self.contained_objects.extend([self.diffusion_state_updater]) if len(constant_over_dt): self.subexpression_updater = SubexpressionUpdater(self, constant_over_dt) self.contained_objects.append(self.subexpression_updater)
def __init__(self, source, target=None, model=None, pre=None, post=None, connect=False, delay=None, namespace=None, dtype=None, codeobj_class=None, clock=None, method=None, name='synapses*'): self._N = 0 Group.__init__(self, when=clock, name=name) self.codeobj_class = codeobj_class self.source = weakref.proxy(source) if target is None: self.target = self.source else: self.target = weakref.proxy(target) ##### Prepare and validate equations if model is None: model = '' if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Check flags model.check_flags({DIFFERENTIAL_EQUATION: ['event-driven'], STATIC_EQUATION: ['summed'], PARAMETER: ['constant']}) # Separate the equations into event-driven and continuously updated # equations event_driven = [] continuous = [] for single_equation in model.itervalues(): if 'event-driven' in single_equation.flags: event_driven.append(single_equation) else: continuous.append(single_equation) # Add the lastupdate variable, used by event-driven equations continuous.append(SingleEquation(PARAMETER, 'lastupdate', second)) if len(event_driven): self.event_driven = Equations(event_driven) else: self.event_driven = None self.equations = Equations(continuous) # Setup the namespace self._given_namespace = namespace self.namespace = create_namespace(namespace) self._queues = {} self._delays = {} # Setup variables self._create_variables() #: Set of `Variable` objects that should be resized when the #: number of synapses changes self._registered_variables = set() for varname, var in self.variables.iteritems(): if isinstance(var, DynamicArrayVariable): # Register the array with the `SynapticItemMapping` object so # it gets automatically resized self.register_variable(var) #: List of names of all updaters, e.g. ['pre', 'post'] self._synaptic_updaters = [] #: List of all `SynapticPathway` objects self._pathways = [] for prepost, argument in zip(('pre', 'post'), (pre, post)): if not argument: continue if isinstance(argument, basestring): self._add_updater(argument, prepost) elif isinstance(argument, collections.Mapping): for key, value in argument.iteritems(): if not isinstance(key, basestring): err_msg = ('Keys for the "{}" argument' 'have to be strings, got ' '{} instead.').format(prepost, type(key)) raise TypeError(err_msg) self._add_updater(value, prepost, objname=key) # If we have a pathway called "pre" (the most common use case), provide # direct access to its delay via a delay attribute (instead of having # to use pre.delay) if 'pre' in self._synaptic_updaters: self.variables.add_reference('delay', self.pre.variables['delay']) if delay is not None: if isinstance(delay, Quantity): if not 'pre' in self._synaptic_updaters: raise ValueError(('Cannot set delay, no "pre" pathway exists.' 'Use a dictionary if you want to set the ' 'delay for a pathway with a different name.')) delay = {'pre': delay} if not isinstance(delay, collections.Mapping): raise TypeError('Delay argument has to be a quantity or a ' 'dictionary, is type %s instead.' % type(delay)) for pathway, pathway_delay in delay.iteritems(): if not pathway in self._synaptic_updaters: raise ValueError(('Cannot set the delay for pathway ' '"%s": unknown pathway.') % pathway) if not isinstance(pathway_delay, Quantity): raise TypeError(('Cannot set the delay for pathway "%s": ' 'expected a quantity, got %s instead.') % (pathway, type(pathway_delay))) if pathway_delay.size != 1: raise TypeError(('Cannot set the delay for pathway "%s": ' 'expected a scalar quantity, got a ' 'quantity with shape %s instead.') % str(pathway_delay.shape)) fail_for_dimension_mismatch(pathway_delay, second, ('Delay has to be ' 'specified in units ' 'of seconds')) updater = getattr(self, pathway) # For simplicity, store the delay as a one-element array # so that for example updater._delays[:] works. updater._delays.resize(1) updater._delays.set_value(float(pathway_delay)) updater._delays.scalar = True # Do not resize the scalar delay variable when adding synapses self.unregister_variable(updater._delays) #: Performs numerical integration step self.state_updater = StateUpdater(self, method) self.contained_objects.append(self.state_updater) #: "Summed variable" mechanism -- sum over all synapses of a #: pre-/postsynaptic target self.summed_updaters = {} # We want to raise an error if the same variable is updated twice # using this mechanism. This could happen if the Synapses object # connected a NeuronGroup to itself since then all variables are # accessible as var_pre and var_post. summed_targets = set() for single_equation in self.equations.itervalues(): if 'summed' in single_equation.flags: varname = single_equation.varname if not (varname.endswith('_pre') or varname.endswith('_post')): raise ValueError(('The summed variable "%s" does not end ' 'in "_pre" or "_post".') % varname) if not varname in self.variables: raise ValueError(('The summed variable "%s" does not refer' 'do any known variable in the ' 'target group.') % varname) if varname.endswith('_pre'): summed_target = self.source orig_varname = varname[:-4] else: summed_target = self.target orig_varname = varname[:-5] target_eq = getattr(summed_target, 'equations', {}).get(orig_varname, None) if target_eq is None or target_eq.type != PARAMETER: raise ValueError(('The summed variable "%s" needs a ' 'corresponding parameter "%s" in the ' 'target group.') % (varname, orig_varname)) fail_for_dimension_mismatch(self.variables['_summed_'+varname].unit, self.variables[varname].unit, ('Summed variables need to have ' 'the same units in Synapses ' 'and the target group')) if self.variables[varname] in summed_targets: raise ValueError(('The target variable "%s" is already ' 'updated by another summed ' 'variable') % orig_varname) summed_targets.add(self.variables[varname]) updater = SummedVariableUpdater(single_equation.expr, varname, self, summed_target) self.summed_updaters[varname] = updater self.contained_objects.append(updater) # Do an initial connect, if requested if not isinstance(connect, (bool, basestring)): raise TypeError(('"connect" keyword has to be a boolean value or a ' 'string, is type %s instead.' % type(connect))) self._initial_connect = connect if not connect is False: self.connect(connect, level=1) # Activate name attribute access self._enable_group_attributes()
def __init__(self, N, model, method=('linear', 'euler', 'milstein'), threshold=None, reset=None, refractory=False, namespace=None, dtype=None, dt=None, clock=None, order=0, name='neurongroup*', codeobj_class=None): Group.__init__(self, dt=dt, clock=clock, when='start', order=order, name=name) self.codeobj_class = codeobj_class try: self._N = N = int(N) except ValueError: if isinstance(N, str): raise TypeError( "First NeuronGroup argument should be size, not equations." ) raise if N < 1: raise ValueError("NeuronGroup size should be at least 1, was " + str(N)) self.start = 0 self.stop = self._N ##### Prepare and validate equations if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Check flags model.check_flags({ DIFFERENTIAL_EQUATION: ('unless refractory', ), PARAMETER: ('constant', 'shared', 'linked'), SUBEXPRESSION: ('shared', ) }) # add refractoriness if refractory is not False: model = add_refractoriness(model) self.equations = model uses_refractoriness = len(model) and any([ 'unless refractory' in eq.flags for eq in model.itervalues() if eq.type == DIFFERENTIAL_EQUATION ]) self._linked_variables = set() logger.debug("Creating NeuronGroup of size {self._N}, " "equations {self.equations}.".format(self=self)) if namespace is None: namespace = {} #: The group-specific namespace self.namespace = namespace # Setup variables self._create_variables(dtype) # All of the following will be created in before_run #: The threshold condition self.threshold = threshold #: The reset statement(s) self.reset = reset #: The refractory condition or timespan self._refractory = refractory if uses_refractoriness and refractory is False: logger.warn( 'Model equations use the "unless refractory" flag but ' 'no refractory keyword was given.', 'no_refractory') #: The state update method selected by the user self.method_choice = method #: Performs thresholding step, sets the value of `spikes` self.thresholder = None if self.threshold is not None: self.thresholder = Thresholder(self) #: Resets neurons which have spiked (`spikes`) self.resetter = None if self.reset is not None: self.resetter = Resetter(self) # We try to run a before_run already now. This might fail because of an # incomplete namespace but if the namespace is already complete we # can spot unit errors in the equation already here. try: self.before_run(None) except KeyError: pass #: Performs numerical integration step self.state_updater = StateUpdater(self, method) # Creation of contained_objects that do the work self.contained_objects.append(self.state_updater) if self.thresholder is not None: self.contained_objects.append(self.thresholder) if self.resetter is not None: self.contained_objects.append(self.resetter) if refractory is not False: # Set the refractoriness information self.variables['lastspike'].set_value(-np.inf * second) self.variables['not_refractory'].set_value(True) # Activate name attribute access self._enable_group_attributes()
def __init__(self, morphology=None, model=None, threshold=None, refractory=False, reset=None, threshold_location=None, dt=None, clock=None, order=0, Cm=0.9 * uF / cm ** 2, Ri=150 * ohm * cm, name='spatialneuron*', dtype=None, namespace=None, method=('linear', 'exponential_euler', 'rk2', 'milstein')): # #### Prepare and validate equations if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Insert the threshold mechanism at the specified location if threshold_location is not None: if hasattr(threshold_location, '_indices'): # assuming this is a method threshold_location = threshold_location._indices() # for now, only a single compartment allowed if len(threshold_location) == 1: threshold_location = threshold_location[0] else: raise AttributeError(('Threshold can only be applied on a ' 'single location')) threshold = '(' + threshold + ') and (i == ' + str(threshold_location) + ')' # Check flags (we have point currents) model.check_flags({DIFFERENTIAL_EQUATION: ('point current',), PARAMETER: ('constant', 'shared', 'linked', 'point current'), SUBEXPRESSION: ('shared', 'point current')}) # Add the membrane potential model += Equations(''' v:volt # membrane potential ''') # Extract membrane equation if 'Im' in model: membrane_eq = model['Im'] # the membrane equation else: raise TypeError('The transmembrane current Im must be defined') # Insert point currents in the membrane equation for eq in model.itervalues(): if 'point current' in eq.flags: fail_for_dimension_mismatch(eq.unit, amp, "Point current " + eq.varname + " should be in amp") eq.flags.remove('point current') membrane_eq.expr = Expression( str(membrane_eq.expr.code) + '+' + eq.varname + '/area') ###### Process model equations (Im) to extract total conductance and the remaining current # Check conditional linearity with respect to v # Match to _A*v+_B var = sp.Symbol('v', real=True) wildcard = sp.Wild('_A', exclude=[var]) constant_wildcard = sp.Wild('_B', exclude=[var]) pattern = wildcard * var + constant_wildcard # Expand expressions in the membrane equation membrane_eq.type = DIFFERENTIAL_EQUATION for var, expr in model._get_substituted_expressions(): # this returns substituted expressions for diff eqs if var == 'Im': Im_expr = expr membrane_eq.type = SUBEXPRESSION # Factor out the variable s_expr = sp.collect(Im_expr.sympy_expr.expand(), var) matches = s_expr.match(pattern) if matches is None: raise TypeError, "The membrane current must be linear with respect to v" a, b = (matches[wildcard], matches[constant_wildcard]) # Extracts the total conductance from Im, and the remaining current minusa_str, b_str = sympy_to_str(-a), sympy_to_str(b) # Add correct units if necessary if minusa_str == '0': minusa_str += '*siemens/meter**2' if b_str == '0': b_str += '*amp/meter**2' gtot_str = "gtot__private=" + minusa_str + ": siemens/meter**2" I0_str = "I0__private=" + b_str + ": amp/meter**2" model += Equations(gtot_str + "\n" + I0_str) # Equations for morphology # TODO: check whether Cm and Ri are already in the equations # no: should be shared instead of constant # yes: should be constant (check) eqs_constants = Equations(""" diameter : meter (constant) length : meter (constant) x : meter (constant) y : meter (constant) z : meter (constant) distance : meter (constant) area : meter**2 (constant) Cm : farad/meter**2 (constant) Ri : ohm*meter (constant, shared) space_constant = (diameter/(4*Ri*gtot__private))**.5 : meter # Not so sure about the name ### Parameters and intermediate variables for solving the cable equation ab_star0 : siemens/meter**2 ab_plus0 : siemens/meter**2 ab_minus0 : siemens/meter**2 ab_star1 : siemens/meter**2 ab_plus1 : siemens/meter**2 ab_minus1 : siemens/meter**2 ab_star2 : siemens/meter**2 ab_plus2 : siemens/meter**2 ab_minus2 : siemens/meter**2 b_plus : siemens/meter**2 b_minus : siemens/meter**2 v_star : volt u_plus : 1 u_minus : 1 """) # Possibilities for the name: characteristic_length, electrotonic_length, length_constant, space_constant # Insert morphology self.morphology = morphology # Link morphology variables to neuron's state variables self.morphology_data = MorphologyData(len(morphology)) self.morphology.compress(self.morphology_data) NeuronGroup.__init__(self, len(morphology), model=model + eqs_constants, threshold=threshold, refractory=refractory, reset=reset, method=method, dt=dt, clock=clock, order=order, namespace=namespace, dtype=dtype, name=name) self.Cm = Cm self.Ri = Ri # TODO: View instead of copy for runtime? self.diameter_ = self.morphology_data.diameter self.distance_ = self.morphology_data.distance self.length_ = self.morphology_data.length self.area_ = self.morphology_data.area self.x_ = self.morphology_data.x self.y_ = self.morphology_data.y self.z_ = self.morphology_data.z # Performs numerical integration step self.add_attribute('diffusion_state_updater') self.diffusion_state_updater = SpatialStateUpdater(self, method, clock=self.clock, order=order) # Creation of contained_objects that do the work self.contained_objects.extend([self.diffusion_state_updater])
def __init__(self, morphology=None, model=None, threshold=None, refractory=False, reset=None, events=None, threshold_location=None, dt=None, clock=None, order=0, Cm=0.9 * uF / cm**2, Ri=150 * ohm * cm, name='spatialneuron*', dtype=None, namespace=None, method=('linear', 'exponential_euler', 'rk2', 'heun')): # #### Prepare and validate equations if isinstance(model, basestring): model = Equations(model) if not isinstance(model, Equations): raise TypeError(('model has to be a string or an Equations ' 'object, is "%s" instead.') % type(model)) # Insert the threshold mechanism at the specified location if threshold_location is not None: if hasattr(threshold_location, '_indices'): # assuming this is a method threshold_location = threshold_location._indices() # for now, only a single compartment allowed if len(threshold_location) == 1: threshold_location = threshold_location[0] else: raise AttributeError(('Threshold can only be applied on a ' 'single location')) threshold = '(' + threshold + ') and (i == ' + str( threshold_location) + ')' # Check flags (we have point currents) model.check_flags({ DIFFERENTIAL_EQUATION: ('point current', ), PARAMETER: ('constant', 'shared', 'linked', 'point current'), SUBEXPRESSION: ('shared', 'point current') }) # Add the membrane potential model += Equations(''' v:volt # membrane potential ''') # Extract membrane equation if 'Im' in model: membrane_eq = model['Im'] # the membrane equation else: raise TypeError('The transmembrane current Im must be defined') # Insert point currents in the membrane equation for eq in model.itervalues(): if 'point current' in eq.flags: fail_for_dimension_mismatch( eq.unit, amp, "Point current " + eq.varname + " should be in amp") eq.flags.remove('point current') membrane_eq.expr = Expression( str(membrane_eq.expr.code) + '+' + eq.varname + '/area') ###### Process model equations (Im) to extract total conductance and the remaining current # Check conditional linearity with respect to v # Match to _A*v+_B var = sp.Symbol('v', real=True) wildcard = sp.Wild('_A', exclude=[var]) constant_wildcard = sp.Wild('_B', exclude=[var]) pattern = wildcard * var + constant_wildcard # Expand expressions in the membrane equation membrane_eq.type = DIFFERENTIAL_EQUATION for var, expr in model.get_substituted_expressions(): if var == 'Im': Im_expr = expr membrane_eq.type = SUBEXPRESSION # Factor out the variable s_expr = sp.collect(str_to_sympy(Im_expr.code).expand(), var) matches = s_expr.match(pattern) if matches is None: raise TypeError, "The membrane current must be linear with respect to v" a, b = (matches[wildcard], matches[constant_wildcard]) # Extracts the total conductance from Im, and the remaining current minusa_str, b_str = sympy_to_str(-a), sympy_to_str(b) # Add correct units if necessary if minusa_str == '0': minusa_str += '*siemens/meter**2' if b_str == '0': b_str += '*amp/meter**2' gtot_str = "gtot__private=" + minusa_str + ": siemens/meter**2" I0_str = "I0__private=" + b_str + ": amp/meter**2" model += Equations(gtot_str + "\n" + I0_str) # Insert morphology (store a copy) self.morphology = copy.deepcopy(morphology) # Flatten the morphology self.flat_morphology = FlatMorphology(morphology) # Equations for morphology # TODO: check whether Cm and Ri are already in the equations # no: should be shared instead of constant # yes: should be constant (check) eqs_constants = Equations(""" length : meter (constant) distance : meter (constant) area : meter**2 (constant) volume : meter**3 diameter : meter (constant) Cm : farad/meter**2 (constant) Ri : ohm*meter (constant, shared) r_length_1 : meter (constant) r_length_2 : meter (constant) time_constant = Cm/gtot__private : second space_constant = (2/pi)**(1.0/3.0) * (area/(1/r_length_1 + 1/r_length_2))**(1.0/6.0) / (2*(Ri*gtot__private)**(1.0/2.0)) : meter """) if self.flat_morphology.has_coordinates: eqs_constants += Equations(''' x : meter (constant) y : meter (constant) z : meter (constant) ''') NeuronGroup.__init__(self, morphology.total_compartments, model=model + eqs_constants, threshold=threshold, refractory=refractory, reset=reset, events=events, method=method, dt=dt, clock=clock, order=order, namespace=namespace, dtype=dtype, name=name) # Parameters and intermediate variables for solving the cable equations # Note that some of these variables could have meaningful physical # units (e.g. _v_star is in volt, _I0_all is in amp/meter**2 etc.) but # since these variables should never be used in user code, we don't # assign them any units self.variables.add_arrays( [ '_ab_star0', '_ab_star1', '_ab_star2', '_a_minus0', '_a_minus1', '_a_minus2', '_a_plus0', '_a_plus1', '_a_plus2', '_b_plus', '_b_minus', '_v_star', '_u_plus', '_u_minus', # The following three are for solving the # three tridiag systems in parallel '_c1', '_c2', '_c3', # The following two are only necessary for # C code where we cannot deal with scalars # and arrays interchangeably: '_I0_all', '_gtot_all' ], unit=1, size=self.N, read_only=True) self.Cm = Cm self.Ri = Ri # These explict assignments will load the morphology values from disk # in standalone mode self.distance_ = self.flat_morphology.distance self.length_ = self.flat_morphology.length self.area_ = self.flat_morphology.area self.diameter_ = self.flat_morphology.diameter self.r_length_1_ = self.flat_morphology.r_length_1 self.r_length_2_ = self.flat_morphology.r_length_2 if self.flat_morphology.has_coordinates: self.x_ = self.flat_morphology.x self.y_ = self.flat_morphology.y self.z_ = self.flat_morphology.z # Performs numerical integration step self.add_attribute('diffusion_state_updater') self.diffusion_state_updater = SpatialStateUpdater(self, method, clock=self.clock, order=order) # Creation of contained_objects that do the work self.contained_objects.extend([self.diffusion_state_updater])