class SpikeSourceInhGamma(StandardCellType): """ Spike source, generating realizations of an inhomogeneous gamma process, employing the thinning method. See: Muller et al (2007) Spike-frequency adapting neural ensembles: Beyond mean-adaptation and renewal theories. Neural Computation 19: 2958-3010. """ default_parameters = { 'a': Sequence( [1.0] ), # time histogram of parameter a of a gamma distribution (dimensionless) 'b': Sequence([ 1.0 ]), # time histogram of parameter b of a gamma distribution (seconds) 'tbins': Sequence([0.0 ]), # time bins of the time histogram of a,b in units of ms 'start': 0.0, # Start time (ms) 'duration': 1e10 # Duration of spike sequence (ms) } recordable = ['spikes'] injectable = False receptor_types = () units = { 'a': 'dimensionless', 'b': 's', 'tbins': 'ms', 'start': 'ms', 'duration': 'ms', }
def poisson(rate, T, rng=NumpyRNG(), rounding=False, t_min=1.0, min_isi=10.0): """ Poisson distributed spikes between [t_min, T), with minimum inter-spike separation and optional rounding Parameters ---------- rate : firing rate (Hz) T : float Time interval (ms) t_min : float Lower bound on generated time value (ms) min_isi : float Minimum inter-spike separation (ms) Returns ------- spike_times : pyNN.parameters.Sequence (float64) """ spike_times = t_min + rng.exponential(1000.0 / rate, 1) while spike_times[-1] < T - min_isi: timing = spike_times[-1] + (min_isi + rng.exponential(1000.0 / rate)) spike_times = np.append(spike_times, timing) spike_times = spike_times[spike_times < T] if rounding: return Sequence(np.floor(spike_times)) else: return Sequence(spike_times)
def prepare_stimulation(self, duration, offset): if not self.sheet.parameters.mpi_safe: self.np_exc[0].set_parameters(rate=self.parameters.exc_firing_rate) self.np_inh[0].set_parameters(rate=self.parameters.inh_firing_rate) else: if (self.parameters.exc_firing_rate != 0 or self.parameters.exc_weight != 0): for j, i in enumerate( numpy.nonzero(self.sheet.pop._mask_local)[0]): pp = self.stgene[j].poisson_generator( rate=self.parameters.exc_firing_rate, t_start=0, t_stop=duration).spike_times self.ssae[i].set_parameters( spike_times=Sequence(offset + numpy.array(pp))) if (self.parameters.inh_firing_rate != 0 or self.parameters.inh_weight != 0): for j, i in enumerate( numpy.nonzero(self.sheet.pop._mask_local)[0]): pp = self.stgene[j].poisson_generator( rate=self.parameters.inh_firing_rate, t_start=0, t_stop=duration).spike_times self.ssai[i].set_parameters( spike_times=Sequence(offset + numpy.array(pp)))
def generate_poisson_spike_times(pop_size, start_time, duration, fr, timestep, random_seed): """ generate_population_spike_times generates (N = pop_size) poisson distributed spiketrains with firing rate fr. Example inputs: pop_size = 10 start_time = 0.0 # ms end_time = 6000.0 # ms timestep = 1 # ms fr = 1 # Hz """ # Convert to sec for calculating the spikes matrix dt = float(timestep) / 1000.0 # sec tSim = float(((start_time + duration) - start_time) / 1000.0) # sec nBins = int(np.floor(tSim / dt)) spikeMat = np.where(np.random.uniform(0, 1, (pop_size, nBins)) < fr * dt) # Create time vector - ms tVec = np.arange(start_time, start_time + duration, timestep) # Make array of spike times for neuron_index in np.arange(pop_size): neuron_spike_times = tVec[spikeMat[1][np.where( spikeMat[0][:] == neuron_index)]] if neuron_index == 0: spike_times = Sequence(neuron_spike_times) else: spike_times = np.vstack( (spike_times, Sequence(neuron_spike_times))) return spike_times
def test_get_sequence_param(self): p = sim.Population(3, sim.SpikeSourceArray(spike_times=[Sequence([1, 2, 3, 4]), Sequence([2, 3, 4, 5]), Sequence([3, 4, 5, 6])])) spike_times = p.get('spike_times') self.assertEqual(spike_times.size, 3) assert_array_equal(spike_times[1], Sequence([2, 3, 4, 5]))
def test_create_with_list_of_lists(self): schema = {'a': Sequence} ps = ParameterSpace({'a': [[1, 2, 3], [4, 5, 6]]}, schema, shape=(2,)) ps.evaluate() assert_array_equal(ps['a'], np.array([Sequence([1, 2, 3]), Sequence([4, 5, 6])], dtype=Sequence))
def set(self, **parameters): # Loop through all parameters parent_params = self.parent._parameters for n, v in iteritems(parameters): # Expand parent parameters param_vals = parent_params[n].evaluate(simplify=False) # If parameter is a sequence and value has a length # **NOTE** following logic is copied from # pyNN.parameters.ParameterSpace if parent_params[n].dtype is Sequence and isinstance(v, Sized): # If it's empty, replace v with empty sequence if len(v) == 0: v = Sequence([]) # Otherwise, if v isn't a sequence of sequences elif not isinstance(v[0], Sequence): # If v is a sequence of some other things with length, if isinstance(v[0], Sized): v = type(v)([Sequence(x) for x in v]) # Otherwise, convert v into a Sequence else: v = Sequence(v) # Replace masked section of values param_vals[self.mask] = v # Convert result back into lazy array parent_params[n] = larray(param_vals, dtype=parent_params[n].dtype, shape=parent_params[n].shape)
def test_create_with_tuple(self): schema = {'a': Sequence} ps = ParameterSpace({'a': (1, 2, 3)}, schema, shape=(2,)) ps.evaluate() assert_array_equal(ps['a'], np.array([Sequence([1, 2, 3]), Sequence([1, 2, 3])], dtype=Sequence))
def test_set_sequence(self): p = sim.Population(3, sim.SpikeSourceArray()) p.set(spike_times=[Sequence([1, 2, 3, 4]), Sequence([2, 3, 4, 5]), Sequence([3, 4, 5, 6])]) spike_times = p.get('spike_times', gather=True) self.assertEqual(spike_times.size, 3) assert_array_equal(spike_times[1], Sequence([2, 3, 4, 5]))
def test_get_sequence_param(self, sim=sim): p = sim.Population(3, sim.SpikeSourceArray, {'spike_times': [Sequence([1, 2, 3, 4]), Sequence([2, 3, 4, 5]), Sequence([3, 4, 5, 6])]}) pv = p[1:] spike_times = pv.get('spike_times') self.assertEqual(spike_times.size, 2) assert_array_equal(spike_times[1], Sequence([3, 4, 5, 6]))
def test_update_SpikeSourceArray(sim, plot_figure=False): sim.setup() sources = sim.Population(2, sim.SpikeSourceArray(spike_times=[])) sources.record('spikes') sim.run(10.0) sources.set(spike_times=[Sequence([12, 15, 18]), Sequence([17, 19])]) sim.run(10.0) sources.set(spike_times=[Sequence([22, 25]), Sequence([23, 27, 29])]) sim.run(10.0) data = sources.get_data().segments[0].spiketrains assert_array_equal(data[0].magnitude, np.array([12, 15, 18, 22, 25]))
def prepare_stimulation(self, duration, offset): assert self.stimulation_duration == duration, "stimulation_duration != duration :" + str( self.stimulation_duration) + " " + str(duration) times = numpy.arange(0, self.stimulation_duration, self.parameters.current_update_interval) + offset times[0] = times[0] + 3 * self.sheet.dt for i in xrange(0, len(self.scs)): self.scs[i].set_parameters(times=Sequence(times), amplitudes=Sequence( self.mixed_signals[i, :].flatten()), copy=False)
class StepCurrentSource(StandardCurrentSource): """A step-wise time-varying current source. Arguments: `times`: list/array of times at which the injected current changes. `amplitudes`: list/array of current amplitudes to be injected at the times specified in `times`. The injected current will be zero up until the first time in `times`. The current will continue at the final value in `amplitudes` until the end of the simulation. """ default_parameters = {'amplitudes': Sequence([]), 'times': Sequence([])}
def get_spikes(self, layer): """Return recorded spike trains of a given layer, from latest run""" spike_trains_out = [ Sequence(np.array(spike_times)) for spike_times in layer.get_data( 'spikes').segments[-1].spiketrains ] return spike_trains_out
def _get_parameters(self, *names): """ return a ParameterSpace containing native parameters """ ids = self.local_cells.tolist() if hasattr(self.celltype, "uses_parrot") and self.celltype.uses_parrot: ids = [id.source for id in ids] if "spike_times" in names: parameter_dict = { "spike_times": [Sequence(value) for value in nest.GetStatus(ids, names)] } else: parameter_dict = {} for name in names: # one name at a time, since some parameter values may be tuples val = np.array(nest.GetStatus(ids, name)) if isinstance(val[0], tuple) or len(val.shape) == 2: val = np.array([ArrayParameter(v) for v in val]) val = LazyArray(simplify(val), shape=(self.local_size, ), dtype=ArrayParameter) parameter_dict[name] = val else: parameter_dict[name] = simplify(val) ps = ParameterSpace(parameter_dict, shape=(self.local_size, )) return ps
class SpikeSourceArray(StandardCellType): """Spike source generating spikes at the times given in the spike_times array.""" default_parameters = { 'spike_times' : Sequence([]) } # list or numpy array containing spike times in milliseconds. recordable = ['spikes'] injectable = False receptor_types = ()
def get_pyNN_value(qty, unit_handler, rng): if isinstance(qty.value, SingleValue): val = unit_handler.scale_value(qty) elif isinstance(qty.value, ArrayValue): scalar = unit_handler.scalar(qty.units) val = Sequence(v * scalar for v in qty.value) elif isinstance(qty.value, RandomDistributionValue): if unit_handler.scalar(qty.units) != 1.0: raise NotImplementedError( "Cannot currently scale random distributions as required to " "get {} into the correct units".format(qty)) try: rv_name, rv_param_names = random_value_map[ qty.value.distribution.standard_library] except KeyError: raise NotImplementedError( "Sorry, '{}' random distributions are not currently supported" .format(qty.value.distribution.standard_libary)) rv_params = [ qty.value.distribution.property(n).value for n in rv_param_names] # UncertML uses 'rate' parameter whereas PyNN uses 'beta' parameter # (1/rate) to define exponential random distributions. if rv_name == 'exponential': rv_params[0] = 1.0 / rv_params[0] # FIXME: Need to scale random distribution to correct units. Should # probably derive PyNN RandomDistribution class to multiply by # when a value is drawn val = RandomDistribution(rv_name, rv_params, rng=rng) return val
def build(self, spike_distrib='uniform'): """Build target spike pattern according to a given distribution""" if self.built: return if spike_distrib == 'uniform': for i in xrange(self.n_trains): self.spike_trains[i] = spikegen.unif( self.param.n_target_spikes, self.param.T, self.param.rng, True, self.param.t_min) elif spike_distrib == 'poisson': # No spikes for target output rate of zero if self.param.n_target_spikes == 0: self.spike_trains = [ Sequence(np.array([])) for i in xrange(self.n_trains) ] else: # Interpret n_target_spikes as expected number of spikes target_rate = self.param.n_target_spikes / self.param.T * 1000. for i in xrange(self.n_trains): while True: self.spike_trains[i] = spikegen.poisson( target_rate, self.param.T, self.param.rng, True, self.param.t_min) # Ensure at least one target spike to classify if len(self.spike_trains[i].value) > 0: break else: raise ValueError('Invalid spiking distribution') self.built = True
def generate_spike_times(i): gen = lambda: Sequence( numpy.add.accumulate( numpy.random.exponential(1000.0 / input_rate, size=number))) if hasattr(i, "__len__"): return [gen() for j in i] else: return gen()
class Cortical_Neuron_Type(NativeCellType): default_parameters = { 'soma_L': 35, 'soma_diam': 25, 'soma_nseg': 1, 'soma_Ra': 150, 'soma_cm': 1, 'soma_bias_current_amp': 0.12, 'ais_L': 20, 'ais_diam': 1.2, 'ais_nseg': 5, 'ais_Ra': 150, 'ais_cm': 0.8, 'myelin_L': 500, 'myelin_L_0': 80, 'myelin_diam': 1.4, 'myelin_Ra': 150, 'myelin_cm': 0.04, 'node_L': 2, 'node_diam': 1.2, 'node_nseg': 1, 'node_Ra': 150, 'node_cm': 0.8, 'collateral_L': 500, 'collateral_diam': 0.5, 'collateral_nseg': 11, 'collateral_Ra': 150, 'collateral_cm': 0.8, 'num_axon_compartments': 10 } # Define initial vector of transfer resistances for the collateral segments initial_collateral_rx = np.zeros( (1, default_parameters['collateral_nseg'])).flatten() initial_collateral_rx_Sequence = Sequence(initial_collateral_rx) default_parameters['collateral_rx'] = initial_collateral_rx_Sequence default_initial_values = {'v': -68.0} recordable = [ 'soma(0.5).v', 'collateral(0.5).v', 'collateral(0.5).i_membrane_', 'ais(0.5).v', 'middle_node(0.5).v', 'middle_myelin(0.5).v', 'AMPA.i', 'GABAa.i' ] units = { 'soma(0.5).v': 'mV', 'collateral(0.5).v': 'mV', 'collateral(0.5).i_membrane_': 'nA', 'ais(0.5).v': 'mV', 'middle_node(0.5).v': 'mV', 'middle_myelin(0.5).v': 'mV', 'AMPA.i': 'nA', 'GABAa.i': 'nA' } receptor_types = ['AMPA', 'GABAa'] model = Cortical_Neuron
def unif(n_spikes, T, rng=NumpyRNG(), rounding=False, t_min=1.0, min_isi=10.0): """ Generate uniformally distributed spikes between [t_min, T), with minimum inter-spike separation and optional rounding pyNN.nest is generally unstable with rounding for input spikes pyNN.nest errors if lowest spike value is exactly equal to dt Input spikes between 0.0 and dt are not integrated over Parameters ---------- n_spikes : int T : float Time interval (ms) t_min : float Lower bound on generated time value (ms) min_isi : float Minimum inter-spike separation : n_spikes*MIN_ISI << T (default 10 ms) Returns ------- spike_times : pyNN.parameters.Sequence (float64) """ spike_times = np.empty([0], dtype=float) while spike_times.size < n_spikes: timing = rng.uniform(t_min, T) # Ensure minimum separation w.r.t. existing spikes if (spike_times.size > 0 and np.min(np.abs(timing - spike_times)) < min_isi): continue else: spike_times = np.append(spike_times, timing) spike_times.sort() if rounding: return Sequence(np.floor(spike_times)) else: return Sequence(spike_times)
def _get_parameters(self, *names): """ return a ParameterSpace containing native parameters """ parameter_dict = {} for name in names: if name == 'spike_times': # hack parameter_dict[name] = [Sequence(getattr(id._cell, name)) for id in self] else: parameter_dict[name] = simplify(numpy.array([getattr(id._cell, name) for id in self])) return ParameterSpace(parameter_dict, shape=(self.local_size,))
def jitter(self, pattern_ref, noise_stdev): """ Builds pattern based on jittered copy of pattern_ref """ for i in xrange(self.n_trains): # Copy spike times from reference (unjittered) pattern spike_times = pattern_ref.spike_trains[i].value.copy() self.spike_trains[i] = Sequence( self.param.rng.normal(spike_times, noise_stdev, spike_times.size)) self.built = True
def prepare_stimulation(self,duration,offset): if (self.parameters.exc_firing_rate != 0 and self.parameters.exc_weight != 0): for j,i in enumerate(self.to_stimulate_indexes): if self.parameters.drive_period < duration: z = numpy.arange(self.parameters.drive_period+0.001,duration-100,10) times = [0] + z.tolist() rate = [self.parameters.exc_firing_rate] + ((1.0-numpy.linspace(0,1.0,len(z)))*self.parameters.exc_firing_rate).tolist() else: times = [0] rate = [self.parameters.exc_firing_rate] pp = self.stgene[j].inh_poisson_generator(numpy.array(rate),numpy.array(times),t_stop=duration).spike_times a = offset + numpy.array(pp) self.ssae[i].set_parameters(spike_times=Sequence(a.astype(float)))
def _get_parameters(self, *names): """ return a ParameterSpace containing native parameters """ ids = self.local_cells.tolist() if hasattr(self.celltype, "uses_parrot") and self.celltype.uses_parrot: ids = [id.source for id in ids] parameter_array = numpy.array(nest.GetStatus(ids, names)) parameter_dict = dict((name, simplify(parameter_array[:, col])) for col, name in enumerate(names)) if "spike_times" in parameter_dict: # hack parameter_dict["spike_times"] = [Sequence(value) for value in parameter_dict["spike_times"]] return ParameterSpace(parameter_dict, shape=(self.local_size,))
def test_get_sequence_param(self, sim=sim): p1 = sim.Population(3, sim.SpikeSourceArray(spike_times=[Sequence([1, 2, 3, 4]), Sequence([2, 3, 4, 5]), Sequence([3, 4, 5, 6])])) p2 = sim.Population(2, sim.SpikeSourceArray(spike_times=[Sequence([4, 5, 6, 7]), Sequence([5, 6, 7, 8])])) a = p1 + p2 spike_times = a.get('spike_times') self.assertEqual(spike_times.size, 5) assert_array_equal(spike_times[3], Sequence([4, 5, 6, 7]))
def __call__(self, t): try: rate = next(rate_generator) if rate > 0: isi = 1000.0 / rate times = t + np.arange(0, self.update_interval, isi) # here each neuron fires with the same isi, # but there is a phase offset between neurons spike_times = [ Sequence(times + phase * isi) for phase in self.population.annotations["phase"] ] else: spike_times = [] self.population.set(spike_times=spike_times) except StopIteration: pass return t + self.update_interval
def _get_parameters(self, *names): """ return a ParameterSpace containing native parameters """ parameter_dict = {} for name in names: if name == 'spike_times': # hack parameter_dict[name] = [Sequence(getattr(id._cell, name)) for id in self] else: val = numpy.array([getattr(id._cell, name) for id in self]) if isinstance(val[0], tuple) or len(val.shape) == 2: val = numpy.array([ArrayParameter(v) for v in val]) val = LazyArray(simplify(val), shape=(self.local_size,), dtype=ArrayParameter) parameter_dict[name] = val else: parameter_dict[name] = simplify(val) parameter_dict[name] = simplify(val) return ParameterSpace(parameter_dict, shape=(self.local_size,))
def createPoissonSpikeInput(X_test, ind, layers): """ Function taken from ``simulation.run_SNN()``. Replaced ``SpikeSourceArray`` with ``SpikeSourcePoisson``, which is faster and easier to debug. """ import numpy as np from pyNN.parameters import Sequence from snntoolbox.utils.config import simparams dt = int(np.ceil(simparams['dt'])) duration = int(simparams['duration']) spike_list = [] # Loop over simulation time with temporal resolution dt to determine # when the test sample causes spikes in the input layer. # Sidenote: The Nest simulator has several restrictions on the # spike times: see http://www.nest-simulator.org/cc/spike_generator/. # For these reasons we shift the spike times about 2dt. for t in range(2 * dt, duration + 2 * dt, dt): # Draw a random sample of the same size as the input sample. spike_snapshot = \ np.random.random_sample(int(np.prod(X_test[0, :].shape))) # Fire a spike at time dt if entry in input sample (flattened to 1D # and multiplied by maximum firing rate) exceeds random number. # (Array of booleans) spikes = spike_snapshot <= X_test[ind, :].flatten() # Convert array of booleans to array of floats indicating the # precise timing of the spikes. Append to the list of spike times, # where each row corresponds to a further step in the simulation. spike_list.append(spikes * (t + 0.001)) # Here spike_list becomes a 2D array of shape [input_size, duration/dt] spike_array = np.array(spike_list).transpose() # To be able to feed it to the input layer, convert it to pyNN # Sequence type. The number of entries in the new container # spike_sequences equals the size of the input sample, and each entry # is a Sequence of nonzero, increasing spike times. spike_sequences = [] for i in range(len(spike_array)): spike_sequences.append( Sequence([j for j in spike_array[i, :] if j != 0])) # Insert poisson input. layers[0].set(spike_times=spike_sequences)
def _set_input_spikes(self, input_config, node_sets, net): # determine which assembly the spikes are for assembly = self._get_target(input_config, node_sets, net) assert isinstance(assembly, self.sim.Assembly) # load spike data from file if input_config["module"] != "h5": raise NotImplementedError() io = SonataIO(base_dir="", spikes_file=input_config["input_file"]) data = io.read() assert len(data) == 1 if "trial" in input_config: raise NotImplementedError() # assuming we can map trials to segments assert len(data[0].segments) == 1 spiketrains = data[0].segments[0].spiketrains if len(spiketrains) != assembly.size: raise NotImplementedError() # todo: map cell ids in spikes file to ids/index in the population #logger.info("SETTING SPIKETIMES") #logger.info(spiketrains) assembly.set(spike_times=[Sequence(st.times.rescale('ms').magnitude) for st in spiketrains])