def __init__(self, filterbank, targetvar, *args, **kwds): self.targetvar = targetvar self.filterbank = filterbank filterbank.buffer_init() # update level keyword kwds['level'] = kwds.get('level', 0) + 1 # Sanitize the clock - does it have the right dt value? if 'clock' in kwds: if int(1 / kwds['clock'].dt) != int(filterbank.samplerate): raise ValueError('Clock should have 1/dt=samplerate') else: kwds['clock'] = Clock(dt=1 / filterbank.samplerate) buffersize = kwds.pop('buffersize', 32) if not isinstance(buffersize, int): buffersize = int(buffersize * self.samplerate) self.buffersize = buffersize self.buffer_pointer = buffersize self.buffer_start = -buffersize NeuronGroup.__init__(self, filterbank.nchannels, *args, **kwds) @network_operation(when='start', clock=self.clock) def apply_filterbank_output(): if self.buffer_pointer >= self.buffersize: self.buffer_pointer = 0 self.buffer_start += self.buffersize self.buffer = self.filterbank.buffer_fetch( self.buffer_start, self.buffer_start + self.buffersize) setattr(self, targetvar, self.buffer[self.buffer_pointer, :]) self.buffer_pointer += 1 self.contained_objects.append(apply_filterbank_output)
def __init__(self, params=zheng_params, network=None): eqs = Equations(''' G_total : siemens G_total_exc : siemens cmr_o : 1 cb : 1 g : 1 c_ab : 1 cb_0 : 1 g_0 : 1 ''') NeuronGroup.__init__(self, 1, model=eqs, compile=True, freeze=True) self.params = params self.c_ab = self.params.c_ab self.cb_0 = self.params.cb_0 self.g_0 = self.params.g_0 self.cb = self.cb_0 self.g = self.g_0 if network is not None: self.G_total = linked_var(network, 'g_syn', func=sum) self.G_total_exc = linked_var(network, 'g_syn_exc', func=sum)
def initialize_neurongroup(self): # Add 'refractory' parameter on the CPU only if not self.use_gpu: if self.max_refractory is not None: refractory = 'refractory' self.model.add_param('refractory', second) else: refractory = self.refractory else: if self.max_refractory is not None: refractory = 0 * ms else: refractory = self.refractory # Must recompile the Equations : the functions are not transfered after pickling/unpickling self.model.compile_functions() self.group = NeuronGroup(self.neurons, model=self.model, reset=self.reset, threshold=self.threshold, refractory=refractory, max_refractory=self.max_refractory, method=self.method, clock=Clock(dt=self.dt)) if self.initial_values is not None: for param, value in self.initial_values.iteritems(): self.group.state(param)[:] = value
def initialize(self,tau_metric): self.delay_range =max(self.delays)- min(self.delays)#delay range self.min_delay = abs(min(self.delays))#minimum of possible delay self.corr_vector=zeros(self.N) self.norm_pop = zeros(self.N) self.norm_target = zeros(self.N) self.nbr_neurons_group = self.N/self.K eqs=""" tau:second dv/dt=(-v)/tau: volt """ # network to convolve target spikes with the kernel self.input_target=SpikeGeneratorGroup(self.K,self.spikes,clock=self.group.clock) self.kernel_target=NeuronGroup(self.N,model=eqs,clock=self.group.clock) self.C_target = DelayConnection(self.input_target, self.kernel_target, 'v', structure='sparse', max_delay=self.min_delay) self.kernel_target.tau=tau_metric for igroup in xrange(self.K): self.C_target.W[igroup,igroup*self.nbr_neurons_group:(1+igroup)*self.nbr_neurons_group] = ones(self.nbr_neurons_group) self.C_target.delay[igroup,igroup*self.nbr_neurons_group:(1+igroup)*self.nbr_neurons_group] = self.min_delay * ones(self.nbr_neurons_group) # network to convolve population spikes with the kernel self.kernel_population=NeuronGroup(self.N,model=eqs,clock=self.group.clock) self.C_population = DelayConnection(self.group, self.kernel_population, 'v', structure='sparse', max_delay=self.delay_range) for iN in xrange(self.N): self.C_population.delay[iN,iN] = diagflat(self.min_delay + self.delays[iN]) self.C_population.connect_one_to_one(self.group,self.kernel_population) self.kernel_population.tau=tau_metric self.spikecount = SpikeCounter(self.group) self.contained_objects = [self.kernel_population,self.C_population,self.spikecount,self.input_target,self.C_target,self.kernel_target]
def ousim(mu_amp, mu_offs, sigma_amp, sigma_offs, freq, V_th): # mu_amp, mu_offs, sigma_amp, sigma_offs, freq, V_th = config if sigma_amp > sigma_offs: sigma_amp = sigma_offs # print("Setting up OU LIF simulation...") ounet = Network() clock.reinit_default_clock() eqs =Equations('dV/dt = mu-(V+V0)/tau + sigma*I/sqrt(dt) : volt') eqs+=Equations('dI/dt = -I/dt + xi/sqrt(dt) : 1') eqs+=Equations('mu = mu_amp*sin(t*freq*2*pi) + mu_offs : volt/second') eqs+=Equations('sigma = sigma_amp*sin(t*freq*2*pi) + sigma_offs :' ' volt/sqrt(second)') eqs.prepare() ounrn = NeuronGroup(1, eqs, threshold=V_th, refractory=t_refr, reset=V_reset) ounet.add(ounrn) ounrn.V = V0 V_mon = StateMonitor(ounrn, 'V', record=True) st_mon = SpikeMonitor(ounrn) ounet.add(V_mon, st_mon) ounet.run(duration) V_mon.insert_spikes(st_mon, value=V_th*2) times = V_mon.times membrane = V_mon[0] return times, st_mon.spiketimes[0], membrane
def pif_reset(): defaultclock.reinit() sim = Network() I = 0.2*nA R = 1*Mohm lifeq = """ dV/dt = I*R/ms : volt Vth : volt """ thstep = 15*mV nrn = NeuronGroup(1, lifeq, threshold="V>=Vth", reset="V=0*mV") nrn.V = 0*mV nrn.Vth = thstep sim.add(nrn) #connection = Connection(inputgrp, nrn, state="V", weight=0.5*mV) #sim.add(inputgrp, connection) vmon = StateMonitor(nrn, "V", record=True) thmon = StateMonitor(nrn, "Vth", record=True) spikemon = SpikeMonitor(nrn, record=True) sim.add(vmon, thmon, spikemon) sim.run(duration) return vmon, thmon, spikemon
def __init__(self, params=zheng_params, network=None): eqs=Equations(''' G_total : siemens G_total_exc : siemens cmr_o : 1 cb : 1 g : 1 c_ab : 1 cb_0 : 1 g_0 : 1 ''') NeuronGroup.__init__(self, 1, model=eqs, compile=True, freeze=True) self.params=params self.c_ab=self.params.c_ab self.cb_0=self.params.cb_0 self.g_0=self.params.g_0 self.cb=self.cb_0 self.g=self.g_0 if network is not None: self.G_total = linked_var(network, 'g_syn', func=sum) self.G_total_exc = linked_var(network, 'g_syn_exc', func=sum)
def __init__(self, filterbank, targetvar, *args, **kwds): self.targetvar = targetvar self.filterbank = filterbank filterbank.buffer_init() # update level keyword kwds['level'] = kwds.get('level', 0)+1 # Sanitize the clock - does it have the right dt value? if 'clock' in kwds: if int(1/kwds['clock'].dt)!=int(filterbank.samplerate): raise ValueError('Clock should have 1/dt=samplerate') else: kwds['clock'] = Clock(dt=1/filterbank.samplerate) buffersize = kwds.pop('buffersize', 32) if not isinstance(buffersize, int): buffersize = int(buffersize*self.samplerate) self.buffersize = buffersize self.buffer_pointer = buffersize self.buffer_start = -buffersize NeuronGroup.__init__(self, filterbank.nchannels, *args, **kwds) @network_operation(when='start', clock=self.clock) def apply_filterbank_output(): if self.buffer_pointer>=self.buffersize: self.buffer_pointer = 0 self.buffer_start += self.buffersize self.buffer = self.filterbank.buffer_fetch(self.buffer_start, self.buffer_start+self.buffersize) setattr(self, targetvar, self.buffer[self.buffer_pointer, :]) self.buffer_pointer += 1 self.contained_objects.append(apply_filterbank_output)
def generate_data(): g = NeuronGroup(1, model=equations, reset=0, threshold=1) g.I = TimedArray(input, dt=.1*ms) g.tau = 25*ms g.R = 3e9 SpM = SpikeMonitor(g) StM = StateMonitor(g, 'V', record=True) net = Network(g, SpM, StM) net.run(1*second) return StM.values[0], SpM.spikes
def build(self, traj, brian_list, network_dict): if not self.pre_built: eqs = Equations(traj.model, tau=traj.tau) ng = NeuronGroup(traj.N, eqs, threshold=traj.threshold, reset=traj.reset, refractory=traj.refr) ng.v0 = traj.v00 brian_list.append(ng) network_dict['group'] = ng
def generate_data(): g = NeuronGroup(1, model=equations, reset=0, threshold=1) g.I = TimedArray(input, dt=.1 * ms) g.tau = 25 * ms g.R = 3e9 SpM = SpikeMonitor(g) StM = StateMonitor(g, 'V', record=True) net = Network(g, SpM, StM) net.run(1 * second) return StM.values[0], SpM.spikes
def test_stim_pyramidal_impact(): simulation_clock=Clock(dt=.5*ms) trial_duration=1*second dcs_start_time=.5*second stim_levels=[-8,-6,-4,-2,-1,-.5,-.25,0,.25,.5,1,2,4,6,8] voltages = np.zeros(len(stim_levels)) for idx,stim_level in enumerate(stim_levels): print('testing stim_level %.3fpA' % stim_level) eqs = exp_IF(default_params.C, default_params.gL, default_params.EL, default_params.VT, default_params.DeltaT) # AMPA conductance - recurrent input current eqs += exp_synapse('g_ampa_r', default_params.tau_ampa, siemens) eqs += Current('I_ampa_r=g_ampa_r*(E-vm): amp', E=default_params.E_ampa) # AMPA conductance - background input current eqs += exp_synapse('g_ampa_b', default_params.tau_ampa, siemens) eqs += Current('I_ampa_b=g_ampa_b*(E-vm): amp', E=default_params.E_ampa) # AMPA conductance - task input current eqs += exp_synapse('g_ampa_x', default_params.tau_ampa, siemens) eqs += Current('I_ampa_x=g_ampa_x*(E-vm): amp', E=default_params.E_ampa) # Voltage-dependent NMDA conductance eqs += biexp_synapse('g_nmda', default_params.tau1_nmda, default_params.tau2_nmda, siemens) eqs += Equations('g_V = 1/(1+(Mg/3.57)*exp(-0.062 *vm/mV)) : 1 ', Mg=default_params.Mg) eqs += Current('I_nmda=g_V*g_nmda*(E-vm): amp', E=default_params.E_nmda) # GABA-A conductance eqs += exp_synapse('g_gaba_a', default_params.tau_gaba_a, siemens) eqs += Current('I_gaba_a=g_gaba_a*(E-vm): amp', E=default_params.E_gaba_a) eqs +=InjectedCurrent('I_dcs: amp') group=NeuronGroup(1, model=eqs, threshold=-20*mV, refractory=pyr_params.refractory, reset=default_params.Vr, compile=True, freeze=True, clock=simulation_clock) group.C=pyr_params.C group.gL=pyr_params.gL @network_operation(clock=simulation_clock) def inject_current(c): if simulation_clock.t>dcs_start_time: group.I_dcs=stim_level*pA monitor=StateMonitor(group, 'vm', simulation_clock, record=True) net=Network(group, monitor, inject_current) net.run(trial_duration, report='text') voltages[idx]=monitor.values[0,-1]*1000 voltages=voltages-voltages[7] plt.figure() plt.plot(stim_levels,voltages) plt.xlabel('Stimulation level (pA)') plt.ylabel('Voltage Change (mV)') plt.show()
def __init__(self, pyramidal_group, clock=defaultclock): eqs = Equations(''' LFP : amp ''') NeuronGroup.__init__(self, 1, model=eqs, compile=True, freeze=True, clock=clock) self.LFP = linked_var(pyramidal_group, 'I_abs', func=sum)
def __init__(self, clock, params=default_params, network=None): eqs = Equations(''' G_total : siemens G_total_exc : siemens ds/dt=eta*(G_total-G_base)/G_base-s/tau_s-(f_in-1.0)/tau_f : 1 df_in/dt=s/second : 1 dv/dt=1/tau_o*(f_in-f_out) : 1 f_out=v**(1.0/alpha) : 1 o_e=1-(1-e_base)**(1/f_in) : 1 dq/dt=1/tau_o*((f_in*o_e/e_base)-f_out*q/v) : 1 y=v_base*((k1+k2)*(1-q)-(k2+k3)*(1-v)) : 1 G_base : siemens eta : 1/second tau_s : second tau_f : second alpha : 1 tau_o : second e_base : 1 v_base : 1 k1 : 1 k2 : 1 k3 : 1 ''') NeuronGroup.__init__(self, 1, model=eqs, clock=clock, compile=True, freeze=True) self.params = params self.G_base = params.G_base self.eta = params.eta self.tau_s = params.tau_s self.tau_f = params.tau_f self.alpha = params.alpha self.tau_o = params.tau_o self.e_base = params.e_base self.v_base = params.v_base self.k1 = params.k1 self.params.s_e = params.s_e_0 * exp(-params.TE / params.T_2E) self.params.s_i = params.s_i_0 * exp(-params.TE / params.T_2I) self.params.beta = self.params.s_e / self.params.s_i self.k2 = self.params.beta * params.r_0 * self.e_base * params.TE self.k3 = self.params.beta - 1 self.f_in = 1 self.s = 0 self.v = 1 self.q = 1 if network is not None: self.G_total = linked_var(network, 'g_syn', func=sum) self.G_total_exc = linked_var(network, 'g_syn_exc', func=sum)
class BretteCriterion(Criterion): def initialize(self,tau_metric): self.delay_range =max(self.delays)- min(self.delays)#delay range self.min_delay = abs(min(self.delays))#minimum of possible delay self.corr_vector=zeros(self.N) self.norm_pop = zeros(self.N) self.norm_target = zeros(self.N) self.nbr_neurons_group = self.N/self.K eqs=""" tau:second dv/dt=(-v)/tau: volt """ # network to convolve target spikes with the kernel self.input_target=SpikeGeneratorGroup(self.K,self.spikes,clock=self.group.clock) self.kernel_target=NeuronGroup(self.N,model=eqs,clock=self.group.clock) self.C_target = DelayConnection(self.input_target, self.kernel_target, 'v', structure='sparse', max_delay=self.min_delay) self.kernel_target.tau=tau_metric for igroup in xrange(self.K): self.C_target.W[igroup,igroup*self.nbr_neurons_group:(1+igroup)*self.nbr_neurons_group] = ones(self.nbr_neurons_group) self.C_target.delay[igroup,igroup*self.nbr_neurons_group:(1+igroup)*self.nbr_neurons_group] = self.min_delay * ones(self.nbr_neurons_group) # network to convolve population spikes with the kernel self.kernel_population=NeuronGroup(self.N,model=eqs,clock=self.group.clock) self.C_population = DelayConnection(self.group, self.kernel_population, 'v', structure='sparse', max_delay=self.delay_range) for iN in xrange(self.N): self.C_population.delay[iN,iN] = diagflat(self.min_delay + self.delays[iN]) self.C_population.connect_one_to_one(self.group,self.kernel_population) self.kernel_population.tau=tau_metric self.spikecount = SpikeCounter(self.group) self.contained_objects = [self.kernel_population,self.C_population,self.spikecount,self.input_target,self.C_target,self.kernel_target] def __call__(self): trace_population = self.kernel_population.state_('v') trace_target = self.kernel_target.state_('v') self.corr_vector += trace_population*trace_target self.norm_pop += trace_population**2 self.norm_target += trace_target**2 def get_values(self): return (self.corr_vector,self.norm_pop,self.norm_target) def normalize(self, values): corr_vector=values[0] norm_pop=values[1] norm_target=values[2] corr_vector[nonzero(self.spikecount.count==0)] = -inf #print self.corr_vector/sqrt(norm_pop)/sqrt(norm_target) return self.corr_vector/sqrt(norm_pop)/sqrt(norm_target)
def __init__(self, clock, params=default_params, network=None): eqs=Equations(''' G_total : siemens G_total_exc : siemens ds/dt=eta*(G_total-G_base)/G_base-s/tau_s-(f_in-1.0)/tau_f : 1 df_in/dt=s/second : 1 dv/dt=1/tau_o*(f_in-f_out) : 1 f_out=v**(1.0/alpha) : 1 o_e=1-(1-e_base)**(1/f_in) : 1 dq/dt=1/tau_o*((f_in*o_e/e_base)-f_out*q/v) : 1 y=v_base*((k1+k2)*(1-q)-(k2+k3)*(1-v)) : 1 G_base : siemens eta : 1/second tau_s : second tau_f : second alpha : 1 tau_o : second e_base : 1 v_base : 1 k1 : 1 k2 : 1 k3 : 1 ''') NeuronGroup.__init__(self, 1, model=eqs, clock=clock, compile=True, freeze=True) self.params=params self.G_base=params.G_base self.eta=params.eta self.tau_s=params.tau_s self.tau_f=params.tau_f self.alpha=params.alpha self.tau_o=params.tau_o self.e_base=params.e_base self.v_base=params.v_base self.k1=params.k1 self.params.s_e=params.s_e_0*exp(-params.TE/params.T_2E) self.params.s_i=params.s_i_0*exp(-params.TE/params.T_2I) self.params.beta=self.params.s_e/self.params.s_i self.k2=self.params.beta*params.r_0*self.e_base*params.TE self.k3=self.params.beta-1 self.f_in=1 self.s=0 self.v=1 self.q=1 if network is not None: self.G_total = linked_var(network, 'g_syn', func=sum) self.G_total_exc = linked_var(network, 'g_syn_exc', func=sum)
def initialize_neurongroup(self): # Add 'refractory' parameter on the CPU only if not self.use_gpu: if self.max_refractory is not None: refractory = 'refractory' self.model.add_param('refractory', second) else: refractory = self.refractory else: if self.max_refractory is not None: refractory = 0*ms else: refractory = self.refractory # Must recompile the Equations : the functions are not transfered after pickling/unpickling self.model.compile_functions() # print refractory, self.max_refractory if type(refractory) is double: refractory=refractory*second # if self.give_neuron_group == False: self.group = NeuronGroup(self.neurons, # TODO: * slices? model=self.model, reset=self.reset, threshold=self.threshold, refractory=refractory, max_refractory = self.max_refractory, method = self.method, clock=Clock(dt=self.dt)) if self.initial_values is not None: for param, value in self.initial_values.iteritems(): self.group.state(param)[:] = value
def initialize_neurongroup(self): # Add 'refractory' parameter on the CPU only if not self.use_gpu: if self.max_refractory is not None: refractory = "refractory" self.model.add_param("refractory", second) else: refractory = self.refractory else: if self.max_refractory is not None: refractory = 0 * ms else: refractory = self.refractory # Must recompile the Equations : the functions are not transfered after pickling/unpickling self.model.compile_functions() self.group = NeuronGroup( self.neurons, model=self.model, reset=self.reset, threshold=self.threshold, refractory=refractory, max_refractory=self.max_refractory, method=self.method, clock=Clock(dt=self.dt), ) if self.initial_values is not None: for param, value in self.initial_values.iteritems(): self.group.state(param)[:] = value
def _build_model(self, traj, brian_list, network_dict): """Builds the neuron groups from `traj`. Adds the neuron groups to `brian_list` and `network_dict`. """ model = traj.parameters.model # Create the equations for both models eqs_dict = self._build_model_eqs(traj) # Create inhibitory neurons eqs_i = eqs_dict['i'] neurons_i = NeuronGroup(N=model.N_i, model=eqs_i, threshold=model.V_th, reset=model.reset_func, refractory=model.refractory, freeze=True, compile=True, method='Euler') # Create excitatory neurons eqs_e = eqs_dict['e'] neurons_e = NeuronGroup(N=model.N_e, model=eqs_e, threshold=model.V_th, reset=model.reset_func, refractory=model.refractory, freeze=True, compile=True, method='Euler') # Set the bias terms neurons_e.mu = rand( model.N_e) * (model.mu_e_max - model.mu_e_min) + model.mu_e_min neurons_i.mu = rand( model.N_i) * (model.mu_i_max - model.mu_i_min) + model.mu_i_min # Set initial membrane potentials neurons_e.V = rand(model.N_e) neurons_i.V = rand(model.N_i) # Add both groups to the `brian_list` and the `network_dict` brian_list.append(neurons_i) brian_list.append(neurons_e) network_dict['neurons_e'] = neurons_e network_dict['neurons_i'] = neurons_i
class VanRossumCriterion(Criterion): def initialize(self, tau): self.delay_range =max(self.delays)- min(self.delays)#delay range self.min_delay = abs(min(self.delays))#minimum of possible delay self.distance_vector=zeros(self.N) self.nbr_neurons_group = self.N/self.K eqs=""" dv/dt=(-v)/tau: volt """ # network to convolve target spikes with the kernel self.input_target=SpikeGeneratorGroup(self.K,self.spikes,clock=self.group.clock) self.kernel_target=NeuronGroup(self.K,model=eqs,clock=self.group.clock) self.C_target = DelayConnection(self.input_target, self.kernel_target, 'v', structure='dense', max_delay=self.min_delay) self.C_target.connect_one_to_one(self.input_target,self.kernel_target) self.C_target.delay = self.min_delay*ones_like(self.C_target.delay) # network to convolve population spikes with the kernel self.kernel_population=NeuronGroup(self.N,model=eqs,clock=self.group.clock) self.C_population = DelayConnection(self.group, self.kernel_population, 'v', structure='sparse', max_delay=self.delay_range) for iN in xrange(self.N): self.C_population.delay[iN,iN] = diagflat(self.min_delay + self.delays[iN]) self.C_population.connect_one_to_one(self.group,self.kernel_population) self.spikecount = SpikeCounter(self.group) self.contained_objects = [self.kernel_population,self.C_population,self.spikecount,self.input_target,self.C_target,self.kernel_target] def __call__(self): trace_population = self.kernel_population.state_('v') trace_target = self.kernel_target.state_('v') for igroup in xrange(self.K): self.distance_vector[igroup*self.nbr_neurons_group:(1+igroup)*self.nbr_neurons_group] += (trace_population[igroup*self.nbr_neurons_group:(1+igroup)*self.nbr_neurons_group]-trace_target[igroup])**2 def get_values(self): return (self.distance_vector) def normalize(self, distance_vector): distance_vector[nonzero(self.spikecount.count==0)] = inf return -self.distance_vector*self.group.clock.dt
def get_spikes(model=None, reset=None, threshold=None, input=None, input_var='I', dt=None, **params): """ Retrieves the spike times corresponding to the best parameters found by the modelfitting function. **Arguments** ``model``, ``reset``, ``threshold``, ``input``, ``input_var``, ``dt`` Same parameters as for the ``modelfitting`` function. ``**params`` The best parameters returned by the ``modelfitting`` function. **Returns** ``spiketimes`` The spike times of the model with the given input and parameters. """ duration = len(input) * dt ngroups = len(params[params.keys()[0]]) group = NeuronGroup(N=ngroups, model=model, reset=reset, threshold=threshold, clock=Clock(dt=dt)) group.set_var_by_array(input_var, TimedArray(input, clock=group.clock)) for param, values in params.iteritems(): if (param == 'delays') | (param == 'fitness'): continue group.state(param)[:] = values M = SpikeMonitor(group) net = Network(group, M) net.run(duration) reinit_default_clock() return M.spikes
def fun(sigma, args): """ This function computes the mean firing rate of a LIF neuron with white noise input current (OU process with threshold). """ if not isscalar(sigma): raise Exception('sigma must be a scalar') N = args['N'] tau = args['tau'] model = args['model'] reset = args['reset'] threshold = args['threshold'] duration = args['duration'] G = NeuronGroup(N, model=model, reset=reset, threshold=threshold) M = SpikeCounter(G) net = Network(G, M) net.run(duration) r = M.nspikes * 1.0 / N return r
def _build_model(self, traj, brian_list, network_dict): """Builds the neuron groups from `traj`. Adds the neuron groups to `brian_list` and `network_dict`. """ assert(isinstance(traj,SingleRun)) model = traj.parameters.model # Create the equations for both models eqs_dict = self._build_model_eqs(traj) # Create inhibitory neurons eqs_i = eqs_dict['i'] neurons_i = NeuronGroup(N=model.N_i, model = eqs_i, threshold=model.V_th, reset=model.reset_func, refractory=model.refractory, freeze=True, compile=True, method='Euler') # Create excitatory neurons eqs_e = eqs_dict['e'] neurons_e = NeuronGroup(N=model.N_e, model = eqs_e, threshold=model.V_th, reset=model.reset_func, refractory=model.refractory, freeze=True, compile=True, method='Euler') # Set the bias terms neurons_e.mu =rand(model.N_e) * (model.mu_e_max - model.mu_e_min) + model.mu_e_min neurons_i.mu =rand(model.N_i) * (model.mu_i_max - model.mu_i_min) + model.mu_i_min # Set initial membrane potentials neurons_e.V = rand(model.N_e) neurons_i.V = rand(model.N_i) # Add both groups to the `brian_list` and the `network_dict` brian_list.append(neurons_i) brian_list.append(neurons_e) network_dict['neurons_e']=neurons_e network_dict['neurons_i']=neurons_i
def __init__(self, clock, params=zheng_params, network=None): eqs = Equations( ''' G_total : siemens G_total_exc : siemens ds/dt=eta*(G_total-G_base)/G_base-s/tau_s-(f_in-1.0)/tau_f : 1 df_in/dt=s/second : 1.0 dv/dt=1/tau_o*(f_in-f_out) : 1 f_out=v**(1.0/alpha) : 1 do_e/dt=1.0/(phi/f_in)*(-o_e+(1.0-g)*(1.0-(1.0-e_base/(1.0-g_0))**(1.0/f_in))) : %.4f dcb/dt=1.0/(phi/f_in)*(-cb-(c_ab*o_e)/oe_log+c_ab*g) : 1 oe_log : 1 cmr_o=(cb-g*c_ab)/(cb_0-g_0*c_ab) : 1 dg/dt=1.0/(j*v_ratio*((r*transitTime)/e_base))*((cmr_o-1.0)-k*s) : %.4f dq/dt=1/tau_o*((f_in*o_e/e_base)-f_out*q/v) : 1 y=v_0*((k1+k2)*(1-q)-(k2+k3)*(1-v)) : 1 G_base : siemens eta : 1/second tau_s : second tau_f : second alpha : 1 tau_o : second v_0 : 1 k1 : 1 k2 : 1 k3 : 1 phi : %.4f*second e_base : %.4f g_0 : %.4f c_ab : 1 cb_0 : 1 v_ratio : 1 j : 1 transitTime : second k : 1 r : 1 ''' % (params.e_base, params.g_0, params.phi, params.e_base, params.g_0)) NeuronGroup.__init__(self, 1, model=eqs, clock=clock, compile=True, freeze=True) self.params = params self.G_base = params.G_base self.eta = params.eta self.tau_s = params.tau_s self.tau_f = params.tau_f self.alpha = params.alpha self.tau_o = params.tau_o self.e_base = params.e_base self.v_0 = params.v_0 self.k1 = params.k1 self.params.s_e = params.s_e_0 * exp(-params.TE / params.T_2E) self.params.s_i = params.s_i_0 * exp(-params.TE / params.T_2I) self.params.beta = self.params.s_e / self.params.s_i self.k2 = self.params.beta * params.r_0 * self.e_base * params.TE self.k3 = self.params.beta - 1.0 self.c_ab = self.params.c_ab self.cb_0 = self.params.cb_0 self.g_0 = self.params.g_0 self.phi = self.params.phi self.v_ratio = self.params.v_ratio self.j = self.params.j self.transitTime = self.params.transitTime self.k = self.params.k self.r = self.params.r self.f_in = 1.0 self.s = 0.0 self.v = 1.0 self.o_e = self.e_base self.cb = self.cb_0 self.g = self.g_0 self.oe_log = np.log(1.0 - self.o_e / (1.0 - self.g)) self.q = 1.0 if network is not None: self.G_total = linked_var(network, 'g_syn', func=sum) self.G_total_exc = linked_var(network, 'g_syn_exc', func=sum)
def __init__(self, pyramidal_group, clock=defaultclock): eqs=Equations(''' LFP : amp ''') NeuronGroup.__init__(self, 1, model=eqs, compile=True, freeze=True, clock=clock) self.LFP=linked_var(pyramidal_group, 'I_abs', func=sum)
def __init__(self, lip_size, params, background_inputs=None, visual_cortex_input=None, go_input=None): self.lip_size = lip_size self.N = 2 * self.lip_size self.params = params self.background_inputs = background_inputs self.visual_cortex_input = visual_cortex_input self.go_input = go_input ## Set up equations # Exponential integrate-and-fire neuron eqs = exp_IF(params.C, params.gL, params.EL, params.VT, params.DeltaT) # AMPA conductance - recurrent input current eqs += exp_synapse('g_ampa_r', params.tau_ampa, siemens) eqs += Current('I_ampa_r=g_ampa_r*(E-vm): amp', E=params.E_ampa) # AMPA conductance - background input current eqs += exp_synapse('g_ampa_b', params.tau_ampa, siemens) eqs += Current('I_ampa_b=g_ampa_b*(E-vm): amp', E=params.E_ampa) # AMPA conductance - task input current eqs += exp_synapse('g_ampa_x', params.tau_ampa, siemens) eqs += Current('I_ampa_x=g_ampa_x*(E-vm): amp', E=params.E_ampa) # AMPA conductance - go input current eqs += exp_synapse('g_ampa_g', params.tau_ampa, siemens) eqs += Current('I_ampa_g=g_ampa_g*(E-vm): amp', E=params.E_ampa) # Voltage-dependent NMDA conductance eqs += biexp_synapse('g_nmda', params.tau1_nmda, params.tau2_nmda, siemens) eqs += Equations('g_V = 1/(1+(Mg/3.57)*exp(-0.062 *vm/mV)) : 1 ', Mg=params.Mg) eqs += Current('I_nmda=g_V*g_nmda*(E-vm): amp', E=params.E_nmda) # GABA-A conductance eqs += exp_synapse('g_gaba_a', params.tau_gaba_a, siemens) eqs += Current('I_gaba_a=g_gaba_a*(E-vm): amp', E=params.E_gaba_a) # GABA-B conductance eqs += biexp_synapse('g_gaba_b', params.tau1_gaba_b, params.tau2_gaba_b, siemens) eqs += Current('I_gaba_b=g_gaba_b*(E-vm): amp', E=params.E_gaba_b) # Total synaptic conductance eqs += Equations( 'g_syn=g_ampa_r+g_ampa_x+g_ampa_g+g_ampa_b+g_V*g_nmda+g_gaba_a+g_gaba_b : siemens' ) eqs += Equations( 'g_syn_exc=g_ampa_r+g_ampa_x+g_ampa_g+g_ampa_b+g_V*g_nmda : siemens' ) # Total synaptic current eqs += Equations( 'I_abs=abs(I_ampa_r)+abs(I_ampa_b)+abs(I_ampa_x)+abs(I_ampa_g)+abs(I_nmda)+abs(I_gaba_a) : amp' ) NeuronGroup.__init__(self, self.N, model=eqs, threshold=-20 * mV, reset=params.EL, compile=True) self.init_subpopulations() self.connections = [] self.init_connectivity() if self.background_inputs is not None: # Background -> E+I population connections background_left_ampa = init_connection(self.background_inputs[0], self.left_lip.neuron_group, 'g_ampa_b', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_b_e, delay=5 * ms) background_right_ampa = init_connection( self.background_inputs[1], self.right_lip.neuron_group, 'g_ampa_b', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_b_e, delay=5 * ms) self.connections.append(background_left_ampa) self.connections.append(background_right_ampa) if self.visual_cortex_input is not None: # Task input -> E population connections vc_left_lip_ampa = init_connection(self.visual_cortex_input[0], self.left_lip.e_contra_vis, 'g_ampa_x', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_v_ec_vis, delay=270 * ms) vc_right_lip_ampa = init_connection(self.visual_cortex_input[1], self.right_lip.e_contra_vis, 'g_ampa_x', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_v_ec_vis, delay=270 * ms) self.connections.append(vc_left_lip_ampa) self.connections.append(vc_right_lip_ampa) if self.go_input is not None: go_left_lip_i_ampa = init_connection(self.go_input, self.left_lip.i_group, 'g_ampa_g', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_g_i, delay=5 * ms) go_right_lip_i_ampa = init_connection(self.go_input, self.right_lip.i_group, 'g_ampa_g', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_g_i, delay=5 * ms) go_left_lip_e_ampa = init_connection(self.go_input, self.left_lip.e_group, 'g_ampa_g', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_g_e, delay=5 * ms) go_right_lip_e_ampa = init_connection(self.go_input, self.right_lip.e_group, 'g_ampa_g', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_g_e, delay=5 * ms) self.connections.append(go_left_lip_i_ampa) self.connections.append(go_right_lip_i_ampa) self.connections.append(go_left_lip_e_ampa) self.connections.append(go_right_lip_e_ampa)
def hodgkin_huxley(duration=100, num=10, percent_excited=0.7, sample=1): """ The hodgkin_huxley function takes the following parameters: duration - is the timeperiod to model for measured in milliseconds. num - an integer value represeting the number of neurons you want to model. percent_excited - a float between 0 and 1 representing the percentage of excited neurons. sample - gives the number of random neurons you would like plotted (default is 1) """ # Assert that we are getting valid input assert(percent_excited >= 0 and percent_excited <= 1.0) assert(duration > 0) assert(num > 0) assert(sample > 0) assert(num >= sample) # Constants used in the modeling equation area = 20000*umetre**2 Cm = (1*ufarad*cm**-2)*area gl = (5e-5*siemens*cm**-2)*area El = -60*mV EK = -90*mV ENa = 50*mV g_na = (100*msiemens*cm**-2)*area g_kd = (30*msiemens*cm**-2)*area VT = -63*mV # Time constants taue = 5*ms # excitatory taui = 10*ms # inhibitory # Reversal potentials Ee = 0*mV # excitatory Ei = -80*mV # inhibitory # Synaptic weights we = 6*nS # excitatory wi = 67*nS # inhibitory # The model equations eqs = Equations( ''' dv/dt = (gl*(El-v)+ge*(Ee-v)+gi*(Ei-v)-g_na*(m*m*m)*h*(v-ENa)-g_kd*(n*n*n*n)*(v-EK))/Cm : volt dm/dt = alpham*(1-m)-betam*m : 1 dn/dt = alphan*(1-n)-betan*n : 1 dh/dt = alphah*(1-h)-betah*h : 1 dge/dt = -ge*(1./taue) : siemens dgi/dt = -gi*(1./taui) : siemens alpham = 0.32*(mV**-1)*(13*mV-v+VT)/(exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz betam = 0.28*(mV**-1)*(v-VT-40*mV)/(exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz alphah = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz betah = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz alphan = 0.032*(mV**-1)*(15*mV-v+VT)/(exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz betan = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz ''' ) # Build the neuron group neurons = NeuronGroup( num, model=eqs, threshold=EmpiricalThreshold(threshold=-20*mV,refractory=3*ms), implicit=True, freeze=True ) num_excited = int(num * percent_excited) num_inhibited = num - num_excited excited = neurons.subgroup(num_excited) inhibited = neurons.subgroup(num_inhibited) excited_conn = Connection(excited, neurons, 'ge', weight=we, sparseness=0.02) inhibited_conn = Connection(inhibited, neurons, 'gi', weight=wi, sparseness=0.02) # Initialization neurons.v = El+(randn(num)*5-5)*mV neurons.ge = (randn(num)*1.5+4)*10.*nS neurons.gi = (randn(num)*12+20)*10.*nS # Record a few trace and run recorded = choice(num, sample) trace = StateMonitor(neurons, 'v', record=recorded) run(duration * msecond) for i in recorded: plot(trace.times/ms, trace[i]/mV) show()
from matplotlib.patches import Rectangle import spikerlib as sl import numpy as np import sys sim = Network() duration = 200*ms dt = 0.1*ms tau = 10*ms Vth = 15*mV Vreset = 0*mV Vreset = 13.65*mV lifeq = "dV/dt = -V/tau : volt" lifnrn = NeuronGroup(1, lifeq, threshold="V>=Vth", reset=Vreset) lifnrn.V = Vreset sim.add(lifnrn) Nin = 200 fin = 80*Hz Sin = 0.6 sigma = 0.0*ms weight = 0.1*mV inputs = sl.tools.fast_synchronous_input_gen(Nin, fin, Sin, sigma, duration) connection = Connection(inputs, lifnrn, "V", weight=weight) sim.add(inputs, connection) vmon = StateMonitor(lifnrn, "V", record=True) spikemon = SpikeMonitor(lifnrn) sim.add(vmon, spikemon)
def runsim(neuron_model, # sim params dt, simtime, prerun, monitors, recvars, # stimulation params fstim, r0_bg, r0_stim, stim_starts, stim_stops, stim_odors, stim_amps, stim_start_var, # network params beeid, N_glu, N_KC, ORNperGlu, PNperKC, PN_I0, LN_I0, # network weights wi, wORNLN, wORNPN, wPNKC, # default params V0min, inh_struct=None, Winh=None, timestep=500, report=None): np.random.seed() #needed for numpy/brian when runing parallel sims define_default_clock(dt=dt) inh_on_off = 0 if (wi == 0) or (wi is None) or (wORNLN is None) else 1 ######################### NEURONGROUPS ######################### NG = dict() # ORN Input # For each glumerolus, random temporal response jitter can be added. # The jitter is added to the response onset. Maximum jitter is given by stim_start_var. # stim_start_jittered is a vector containing the jittered stim start tims # orn_activation returns a booolean vector of stim presence given time t # Total ORN rate: Baseline componenent equal for all units, # and individual activationa. jitter = np.random.uniform(0,stim_start_var,N_glu) stim_tun = lambda odorN: fstim(N_glu=N_glu, odorN=odorN) * r0_stim orn_activation = lambda t: np.sum([ a*stim_tun(odorN=o)*np.logical_and(np.greater(t,prerun+stim_start+jitter), np.less(t,prerun+stim_stop)) for stim_start,stim_stop,o,a in zip(stim_starts, stim_stops, stim_odors, stim_amps)], 0) orn_rates = lambda t: np.repeat(r0_bg + orn_activation(t),repeats = ORNperGlu) NG['ORN'] = PoissonGroup(ORNperGlu*N_glu, rates=orn_rates) NG['PN'] = NeuronGroup(N_glu, **neuron_model) NG['LN'] = NeuronGroup(N_glu*inh_on_off, **neuron_model) if 'KC' in monitors: NG['KC'] = NeuronGroup(N_KC, **neuron_model) ######################### CONNECTIONS ######################### c = dict() c['ORNPN'] = Connection(NG['ORN'],NG['PN'],'ge') for i in np.arange(N_glu): c['ORNPN'].connect_full(NG['ORN'].subgroup(ORNperGlu),NG['PN'][i],weight=wORNPN) if inh_on_off: print('-- inhibiting --',wi) c['ORNLN'] = Connection(NG['ORN'],NG['LN'],'ge') c['LNPN'] = Connection(NG['LN'],NG['PN'],'gi',weight=(wi*35)/N_glu) for i in np.arange(N_glu): c['ORNLN'].connect_full(NG['ORN'][ i*ORNperGlu : (i+1)*ORNperGlu ], NG['LN'][i], weight = wORNLN) if inh_struct: c['LNPN'].connect(NG['LN'],NG['PN'],Winh) if 'KC' in monitors: c['KC'] = Connection(NG['PN'],NG['KC'],'ge') c['KC'].connect_random(NG['PN'],NG['KC'],p=PNperKC/float(N_glu),weight=wPNKC,seed=beeid) ######################### INITIAL VALUES ######################### VT = neuron_model['threshold'] NG['PN'].vm = np.random.uniform(V0min,VT,size=len(NG['PN'])) if inh_on_off: NG['LN'].vm= np.random.uniform(V0min,VT,size=len(NG['LN'])) if 'KC' in monitors: NG['KC'].vm= np.random.uniform(V0min,VT,size=len(NG['KC'])) net = Network(NG.values(), c.values()) #### Compensation currents ### NG['PN'].I0 = PN_I0 NG['LN'].I0 = LN_I0 ########################################################################## ######################### PRE-RUN ######################### net.run(prerun) ######################### MONITORS ######################### spmons = [SpikeMonitor(NG[mon], record=True) for mon in monitors] net.add(spmons) if len(recvars) > 0: mons = [MultiStateMonitor(NG[mon], vars=recvars, record=True, timestep=timestep) for mon in monitors] net.add(mons) else: mons = None ######################### RUN ######################### net = run(simtime, report=report) out_spikes = dict( (monitors[i],np.array(sm.spikes)) for i,sm in enumerate(spmons) ) if mons is not None: out_mons = dict( (mon,dict((var,statemon.values) for var,statemon in m.iteritems())) for mon,m in zip(monitors,mons)) else: out_mons = None #subtract the prerun from spike times, if there are any for spikes in out_spikes.itervalues(): if len(spikes) != 0: spikes[:,1] -= prerun return out_spikes, out_mons
from brian import (NeuronGroup, Network, StateMonitor, second, ms, volt, mV) import numpy as np import matplotlib.pyplot as plt network = Network() XT = -50*mV DeltaT = 0.05*mV/ms eqs = "dX/dt = DeltaT*exp((X-XT)/DeltaT) : volt" neuron = NeuronGroup(1, eqs, threshold="X>=XT", reset=-65*mV) neuron.X = -65*mV network.add(neuron) vmon = StateMonitor(neuron, "X", record=True) network.add(vmon) network.run(1*second) plt.figure("Voltage") plt.plot(vmon.times, vmon[0]) plt.show()
betah=1./(exp(-0.1/mV*(V+28*mV))+1)/ms : Hz dn/dt=5*(alphan*(1-n)-betan*n) : 1 alphan=-0.01/mV*(V+34*mV)/(exp(-0.1/mV*(V+34*mV))-1)/ms : Hz betan=0.125*exp(-(V+44*mV)/(80*mV))/ms : Hz dgExc/dt = -gExc*(1./taue) : siemens dgInh/dt = -gInh*(1./taui) : siemens Iapp : amp ''' neuron = NeuronGroup(1, eqs, threshold=threshold, method='RK') neuron.V = -70*mV # delays B1, A1, A2, A3 = 5*ms, 20*ms, 30*ms, 40*ms target_delay = A2-B1 # delay to be learned by neuron spikes_A = [(0, 10*ms), (0, 115*ms), (0, 300*ms), (0, 450*ms)] spikes_B = [(1, 10*ms), (1, 130*ms), (1, 335*ms), (1, 475*ms)] inputs = SpikeGeneratorGroup(2, spikes_A+spikes_B) synapse_A = Synapses(inputs[0], neuron, model="w : siemens", pre="gExc_post += w") synapse_A[:,:] = 3 synapse_A.w = WExc synapse_A.delay[0] = A1
from brian import log_level_debug log_level_debug() set_global_preferences(useweave=True,usecodegen=True,usecodegenweave=True,usenewpropagate=True,usecstdp=True) from matplotlib.pyplot import plot, show, subplot params = {} params["t_Nr"] = 2*ms params["t_Nf"] = 80*ms params["t_AMPA"] = 5*ms simclock = Clock(dt=0.01*ms) input=NeuronGroup(2,model='dv/dt=1/(10*ms):1', threshold=1, reset=0,clock=simclock) neurons = NeuronGroup(1, model="""dv/dt=(NMDAo+AMPAo-v)/(10*ms) : 1 NMDAo : 1 AMPAo : 1""", freeze = True,clock=simclock) ampadyn = ''' dAMPAoS/dt = -AMPAoS/t_AMPA : 1 AMPAi = AMPAoS AMPAo = AMPAoS / (t_AMPA /msecond) : 1 ''' nmdadyn = ''' dNMDAoS/dt = (1/t_Nr)*(Nnor*NMDAi-NMDAoS) : 1 dNMDAi/dt = -(1/t_Nf)*NMDAi : 1 Nnor = (t_Nf/t_Nr)**((t_Nr)/(t_Nf - t_Nr)) : 1 Nscal = (t_Nf/msecond)**(t_Nf/(t_Nf - t_Nr))/(t_Nr/msecond)**(t_Nr/(t_Nf - t_Nr)) : 1
def run_simulation(realizations=1, trials=1, t=3000 * ms, alpha=1, ree=1, k=50, winlen = 50 * ms, verbose=True, t_stim = 0): """ Run the whole simulation with the specified parameters. All model parameter are set in the function. Keyword arguments: :param realizations: number of repititions of the whole simulation, number of network instances :param trials: number of trials for network instance :param t: simulation time :param alpha: scaling factor for number of neurons in the network :param ree: clustering coefficient :param k: number of clusters :param t_stim : duration of stimulation of a subset of clusters :param winlen: length of window in ms :param verbose: plotting flag :return: numpy matrices with spike times """ # The equations defining our neuron model eqs_string = ''' dV/dt = (mu - V)/tau + x: volt dx/dt = -1.0/tau_2*(x - y/tau_1) : volt/second dy/dt = -y/tau_1 : volt mu : volt tau: second tau_2: second tau_1: second ''' # Model parameters n_e = int(4000 * alpha) # number of exc neurons n_i = int(1000 * alpha) # number of inh neurons tau_e = 15 * ms # membrane time constant (for excitatory synapses) tau_i = 10 * ms # membrane time constant (for inhibitory synapses) tau_syn_2_e = 3 * ms # exc synaptic time constant tau2 in paper tau_syn_2_i = 2 * ms # inh synaptic time constant tau2 in paper tau_syn_1 = 1 * ms # exc/inh synaptic time constant tau1 in paper vt = -50 * mV # firing threshold vr = -65 * mV # reset potential dv = vt - vr # delta v refrac = 5 * ms # absolute refractory period # scale the weights to ensure same variance in the inputs wee = 0.024 * dv * np.sqrt(1. / alpha) wie = 0.014 * dv * np.sqrt(1. / alpha) wii = -0.057 * dv * np.sqrt(1. / alpha) wei = -0.045 * dv * np.sqrt(1. / alpha) # Connection probability p_ee = 0.2 p_ii = 0.5 p_ie = 0.5 p_ei = 0.5 # determine probs for inside and outside of clusters p_in, p_out = get_cluster_connection_probs(ree, k, p_ee) mu_min_e, mu_max_e = 1.1, 1.2 mu_min_i, mu_max_i = 1.0, 1.05 # increase cluster weights if there are clusters wee_cluster = wee if p_in == p_out else 1.9 * wee # define numpy array for data storing all_data = np.zeros((realizations, trials, n_e+n_i, int(t/winlen)//2)) for realization in range(realizations): # clear workspace to make sure that is a new realization of the network clear(True, True) reinit() # set up new random bias parameter for every type of neuron mu_e = vr + np.random.uniform(mu_min_e, mu_max_e, n_e) * dv # bias for excitatory neurons mu_i = vr + np.random.uniform(mu_min_i, mu_max_i, n_i) * dv # bias for excitatory neurons # Let's create an equation object from our string and parameters model_eqs = Equations(eqs_string) # Let's create 5000 neurons all_neurons = NeuronGroup(N=n_e + n_i, model=model_eqs, threshold=vt, reset=vr, refractory=refrac, freeze=True, method='Euler', compile=True) # Divide the neurons into excitatory and inhibitory ones neurons_e = all_neurons[0:n_e] neurons_i = all_neurons[n_e:n_e + n_i] # set the bias neurons_e.mu = mu_e neurons_i.mu = mu_i neurons_e.tau = tau_e neurons_i.tau = tau_i neurons_e.tau_2 = tau_syn_2_e neurons_i.tau_2 = tau_syn_2_i all_neurons.tau_1 = tau_syn_1 # set up connections connections = Connection(all_neurons, all_neurons, 'y') # do the cluster connection like cross validation: cluster neuron := test idx; other neurons := train idx kf = KFold(n=n_e, n_folds=k) for idx_out, idx_in in kf: # idx_out holds all other neurons; idx_in holds all cluster neurons # connect current cluster to itself connections.connect_random(all_neurons[idx_in[0]:idx_in[-1]], all_neurons[idx_in[0]:idx_in[-1]], sparseness=p_in, weight=wee_cluster) # connect current cluster to other neurons connections.connect_random(all_neurons[idx_in[0]:idx_in[-1]], all_neurons[idx_out[0]:idx_out[-1]], sparseness=p_out, weight=wee) # connect all excitatory to all inhibitory, irrespective of clustering connections.connect_random(all_neurons[0:n_e], all_neurons[n_e:(n_e + n_i)], sparseness=p_ie, weight=wie) # connect all inhibitory to all excitatory connections.connect_random(all_neurons[n_e:(n_e + n_i)], all_neurons[0:n_e], sparseness=p_ei, weight=wei) # connect all inhibitory to all inhibitory connections.connect_random(all_neurons[n_e:(n_e + n_i)], all_neurons[n_e:(n_e + n_i)], sparseness=p_ii, weight=wii) # set up spike monitors spike_mon_e = SpikeMonitor(neurons_e) spike_mon_i = SpikeMonitor(neurons_i) # set up network with monitors network = Network(all_neurons, connections, spike_mon_e, spike_mon_i) # run this network for some number of trials, every time with for trial in range(trials): # different initial values all_neurons.V = vr + (vt - vr) * np.random.rand(len(all_neurons)) * 1.4 # Calibration phase # run for the first half of the time to let the neurons adapt network.run(t/2) # reset monitors to start recording phase spike_mon_i.reinit() spike_mon_e.reinit() # stimulation if duration is given # define index variable for the stimulation possibility (is 0 for stimulation time=0) t_stim_idx = int(t_stim / (winlen/ms)) if not(t_stim==0): # Stimulation phase, increase input to subset of clusters all_neurons[:400].mu += 0.07 * dv network.run(t_stim * ms, report='text') # set back to normal all_neurons[:400].mu -= 0.07 * dv # save data all_data[realization, trial, :n_e, :t_stim_idx] = spikes_counter(spike_mon_e, winlen) all_data[realization, trial, n_e:, :t_stim_idx] = spikes_counter(spike_mon_i, winlen) # reset monitors spike_mon_e.reinit() spike_mon_i.reinit() # run the remaining time of the simulation network.run((t/2) - t_stim*ms, report='text') # save results all_data[realization, trial, :n_e, t_stim_idx:] = spikes_counter(spike_mon_e, winlen) all_data[realization, trial, n_e:, t_stim_idx:] = spikes_counter(spike_mon_i, winlen) if verbose: plt.ion() plt.figure() raster_plot(spike_mon_e) plt.title('Excitatory neurons') spike_mon_e.reinit() spike_mon_i.reinit() return all_data
def reinit(self): NeuronGroup.reinit(self) self.filterbank.buffer_init() self.buffer_pointer = self.buffersize self.buffer_start = -self.buffersize
def runsim(fin): clear(True) gc.collect() defaultclock.reinit() weight = 0.16*mV sim = Network() duration = 2.0*second Vth = 15*mV Vreset = 13.65*mV trefr = 2*ms lifeq = """ dV/dt = -V/(10*ms) : volt Vth : volt """ nrndef = {"model": lifeq, "threshold": "V>=Vth", "reset": "V=Vreset", "refractory": 0.1*ms} inputgroups = [] connections = [] neurons = [] Nneurons = len(fin) neurons = NeuronGroup(Nneurons, **nrndef) neurons.V = 0*mV neurons.Vth = 15*mV for idx in range(Nneurons): fin_i = fin[idx]*Hz inputgrp = PoissonGroup(50, fin_i) conn = Connection(inputgrp, neurons[idx], state="V", weight=weight) inputgroups.append(inputgrp) connections.append(conn) voltagemon = StateMonitor(neurons, "V", record=True) spikemon = SpikeMonitor(neurons, record=True) sim.add(neurons, voltagemon, spikemon) sim.add(*inputgroups) sim.add(*connections) @network_operation def refractory_threshold(clock): for idx in range(Nneurons): if (len(spikemon.spiketimes[idx]) and clock.t < spikemon.spiketimes[idx][-1]*second+trefr): neurons.Vth[idx] = 100*mV else: neurons.Vth[idx] = Vth sim.add(refractory_threshold) print("Running simulation of {} neurons for {} s".format(Nneurons, duration)) sim.run(duration, report="stdout") mnpss = [] allnpss = [] outisi = [] for idx in range(Nneurons): vmon = voltagemon[idx] smon = spikemon[idx] if not len(smon): continue outisi.append(duration*1000/len(smon)) if len(smon) > 0: npss = sl.tools.npss(vmon, smon, 0*mV, 15*mV, 10*ms, 2*ms) else: npss = 0 mnpss.append(np.mean(npss)) allnpss.append(npss) return outisi, mnpss
def run_simulation(realizations=1, trials=1, t=3000 * ms, alpha=1, ree=1, k=50, winlen=50 * ms, verbose=True, t_stim=0): """ Run the whole simulation with the specified parameters. All model parameter are set in the function. Keyword arguments: :param realizations: number of repititions of the whole simulation, number of network instances :param trials: number of trials for network instance :param t: simulation time :param alpha: scaling factor for number of neurons in the network :param ree: clustering coefficient :param k: number of clusters :param t_stim : duration of stimulation of a subset of clusters :param winlen: length of window in ms :param verbose: plotting flag :return: numpy matrices with spike times """ # The equations defining our neuron model eqs_string = """ dV/dt = (mu - V)/tau + x: volt dx/dt = -1.0/tau_2*(x - y/tau_1) : volt/second dy/dt = -y/tau_1 : volt mu : volt tau: second tau_2: second tau_1: second """ # Model parameters n_e = int(4000 * alpha) # number of exc neurons n_i = int(1000 * alpha) # number of inh neurons tau_e = 15 * ms # membrane time constant (for excitatory synapses) tau_i = 10 * ms # membrane time constant (for inhibitory synapses) tau_syn_2_e = 3 * ms # exc synaptic time constant tau2 in paper tau_syn_2_i = 2 * ms # inh synaptic time constant tau2 in paper tau_syn_1 = 1 * ms # exc/inh synaptic time constant tau1 in paper vt = -50 * mV # firing threshold vr = -65 * mV # reset potential dv = vt - vr # delta v refrac = 5 * ms # absolute refractory period # scale the weights to ensure same variance in the inputs wee = 0.024 * dv * np.sqrt(1.0 / alpha) wie = 0.014 * dv * np.sqrt(1.0 / alpha) wii = -0.057 * dv * np.sqrt(1.0 / alpha) wei = -0.045 * dv * np.sqrt(1.0 / alpha) # Connection probability p_ee = 0.2 p_ii = 0.5 p_ie = 0.5 p_ei = 0.5 # determine probs for inside and outside of clusters p_in, p_out = get_cluster_connection_probs(ree, k, p_ee) mu_min_e, mu_max_e = 1.1, 1.2 mu_min_i, mu_max_i = 1.0, 1.05 # increase cluster weights if there are clusters wee_cluster = wee if p_in == p_out else 1.9 * wee # define numpy array for data storing all_data = np.zeros((realizations, trials, n_e + n_i, int(t / winlen) // 2)) for realization in range(realizations): # clear workspace to make sure that is a new realization of the network clear(True, True) reinit() # set up new random bias parameter for every type of neuron mu_e = vr + np.random.uniform(mu_min_e, mu_max_e, n_e) * dv # bias for excitatory neurons mu_i = vr + np.random.uniform(mu_min_i, mu_max_i, n_i) * dv # bias for excitatory neurons # Let's create an equation object from our string and parameters model_eqs = Equations(eqs_string) # Let's create 5000 neurons all_neurons = NeuronGroup( N=n_e + n_i, model=model_eqs, threshold=vt, reset=vr, refractory=refrac, freeze=True, method="Euler", compile=True, ) # Divide the neurons into excitatory and inhibitory ones neurons_e = all_neurons[0:n_e] neurons_i = all_neurons[n_e : n_e + n_i] # set the bias neurons_e.mu = mu_e neurons_i.mu = mu_i neurons_e.tau = tau_e neurons_i.tau = tau_i neurons_e.tau_2 = tau_syn_2_e neurons_i.tau_2 = tau_syn_2_i all_neurons.tau_1 = tau_syn_1 # set up connections connections = Connection(all_neurons, all_neurons, "y") # do the cluster connection like cross validation: cluster neuron := test idx; other neurons := train idx kf = KFold(n=n_e, n_folds=k) for idx_out, idx_in in kf: # idx_out holds all other neurons; idx_in holds all cluster neurons # connect current cluster to itself connections.connect_random( all_neurons[idx_in[0] : idx_in[-1]], all_neurons[idx_in[0] : idx_in[-1]], sparseness=p_in, weight=wee_cluster, ) # connect current cluster to other neurons connections.connect_random( all_neurons[idx_in[0] : idx_in[-1]], all_neurons[idx_out[0] : idx_out[-1]], sparseness=p_out, weight=wee ) # connect all excitatory to all inhibitory, irrespective of clustering connections.connect_random(all_neurons[0:n_e], all_neurons[n_e : (n_e + n_i)], sparseness=p_ie, weight=wie) # connect all inhibitory to all excitatory connections.connect_random(all_neurons[n_e : (n_e + n_i)], all_neurons[0:n_e], sparseness=p_ei, weight=wei) # connect all inhibitory to all inhibitory connections.connect_random( all_neurons[n_e : (n_e + n_i)], all_neurons[n_e : (n_e + n_i)], sparseness=p_ii, weight=wii ) # set up spike monitors spike_mon_e = SpikeMonitor(neurons_e) spike_mon_i = SpikeMonitor(neurons_i) # set up network with monitors network = Network(all_neurons, connections, spike_mon_e, spike_mon_i) # run this network for some number of trials, every time with for trial in range(trials): # different initial values all_neurons.V = vr + (vt - vr) * np.random.rand(len(all_neurons)) * 1.4 # Calibration phase # run for the first half of the time to let the neurons adapt network.run(t / 2) # reset monitors to start recording phase spike_mon_i.reinit() spike_mon_e.reinit() # stimulation if duration is given # define index variable for the stimulation possibility (is 0 for stimulation time=0) t_stim_idx = int(t_stim / (winlen / ms)) if not (t_stim == 0): # Stimulation phase, increase input to subset of clusters all_neurons[:400].mu += 0.07 * dv network.run(t_stim * ms, report="text") # set back to normal all_neurons[:400].mu -= 0.07 * dv # save data all_data[realization, trial, :n_e, :t_stim_idx] = spikes_counter(spike_mon_e, winlen) all_data[realization, trial, n_e:, :t_stim_idx] = spikes_counter(spike_mon_i, winlen) # reset monitors spike_mon_e.reinit() spike_mon_i.reinit() # run the remaining time of the simulation network.run((t / 2) - t_stim * ms, report="text") # save results all_data[realization, trial, :n_e, t_stim_idx:] = spikes_counter(spike_mon_e, winlen) all_data[realization, trial, n_e:, t_stim_idx:] = spikes_counter(spike_mon_i, winlen) if verbose: plt.ion() plt.figure() raster_plot(spike_mon_e) plt.title("Excitatory neurons") spike_mon_e.reinit() spike_mon_i.reinit() return all_data
class ModelFitting(Fitness): def initialize(self, **kwds): # Initialization of variables self.use_gpu = self.unit_type=='GPU' # Gets the key,value pairs in shared_data for key, val in self.shared_data.iteritems(): setattr(self, key, val) # Gets the key,value pairs in **kwds for key, val in kwds.iteritems(): setattr(self, key, val) self.neurons = self.nodesize self.groups = self.groups self.model = cPickle.loads(self.model) if type(self.model) is str: self.model = Equations(self.model) self.initialize_neurongroup() self.transform_data() self.inject_input() # if self.use_gpu: # ######## # # TODO # ######## # # Select integration scheme according to method # if self.method == 'Euler': scheme = euler_scheme # elif self.method == 'RK': scheme = rk2_scheme # elif self.method == 'exponential_Euler': scheme = exp_euler_scheme # else: raise Exception("The numerical integration method is not valid") # # self.mf = GPUModelFitting(self.group, self.model, self.input, self.I_offset, # self.spiketimes, self.spiketimes_offset, zeros(self.neurons), 0*ms, self.delta, # precision=self.precision, scheme=scheme) # else: # self.cc = CoincidenceCounter(self.group, self.spiketimes, self.spiketimes_offset, # onset=self.onset, delta=self.delta) def initialize_neurongroup(self): # Add 'refractory' parameter on the CPU only if not self.use_gpu: if self.max_refractory is not None: refractory = 'refractory' self.model.add_param('refractory', second) else: refractory = self.refractory else: if self.max_refractory is not None: refractory = 0*ms else: refractory = self.refractory # Must recompile the Equations : the functions are not transfered after pickling/unpickling self.model.compile_functions() self.group = NeuronGroup(self.neurons, model=self.model, reset=self.reset, threshold=self.threshold, refractory=refractory, max_refractory = self.max_refractory, method = self.method, clock=Clock(dt=self.dt)) if self.initial_values is not None: for param, value in self.initial_values.iteritems(): self.group.state(param)[:] = value def transform_data(self): self.transformer = DataTransformer(self.neurons, self.inputs, spikes = self.spikes, traces = self.traces, dt = self.dt, slices = self.slices, overlap = self.overlap, groups = self.groups) self.total_steps = self.transformer.total_steps self.sliced_duration = self.transformer.sliced_duration self.sliced_inputs = self.transformer.slice_traces(self.inputs) self.inputs_inline, self.inputs_offset = self.transformer.transform_traces(self.sliced_inputs) if self.traces is not None: self.sliced_traces = self.transformer.slice_traces(self.traces) self.traces_inline, self.traces_offset = self.transformer.transform_traces(self.sliced_traces) else: self.sliced_traces, self.traces_inline, self.traces_offset = None, None, None if self.spikes is not None: self.sliced_spikes = self.transformer.slice_spikes(self.spikes) self.spikes_inline, self.spikes_offset = self.transformer.transform_spikes(self.sliced_spikes) else: self.sliced_spikes, self.spikes_inline, self.spikes_offset = None, None, None def inject_input(self): # Injects current in consecutive subgroups, where I_offset have the same value # on successive intervals I_offset = self.inputs_offset k = -1 for i in hstack((nonzero(diff(I_offset))[0], len(I_offset) - 1)): I_offset_subgroup_value = I_offset[i] I_offset_subgroup_length = i - k sliced_subgroup = self.group.subgroup(I_offset_subgroup_length) input_sliced_values = self.inputs_inline[I_offset_subgroup_value:I_offset_subgroup_value + self.total_steps] sliced_subgroup.set_var_by_array(self.input_var, TimedArray(input_sliced_values, clock=self.group.clock)) k = i def initialize_criterion(self, delays,tau_metric = None): # general criterion parameters params = dict(group=self.group, traces=self.sliced_traces, spikes=self.sliced_spikes, targets_count=self.groups*self.slices, duration=self.sliced_duration, onset=self.onset, spikes_inline=self.spikes_inline, spikes_offset=self.spikes_offset, traces_inline=self.traces_inline, traces_offset=self.traces_offset, delays=delays, when='start') criterion_name = self.criterion.__class__.__name__ # criterion-specific parameters if criterion_name == 'GammaFactor': params['delta'] = self.criterion.delta params['coincidence_count_algorithm'] = self.criterion.coincidence_count_algorithm self.criterion_object = GammaFactorCriterion(**params) if criterion_name == 'LpError': params['p'] = self.criterion.p params['varname'] = self.criterion.varname self.criterion_object = LpErrorCriterion(**params) if criterion_name == 'VanRossum': params['tau'] = self.criterion.tau self.criterion_object = VanRossumCriterion(**params) if criterion_name == 'Brette': params['tau_metric'] = tau_metric self.criterion_object = BretteCriterion(**params) def update_neurongroup(self, **param_values): """ Inject fitting parameters into the NeuronGroup """ # Sets the parameter values in the NeuronGroup object self.group.reinit() for param, value in param_values.iteritems(): self.group.state(param)[:] = kron(value, ones(self.slices)) # kron param_values if slicing # Reinitializes the model variables if self.initial_values is not None: for param, value in self.initial_values.iteritems(): self.group.state(param)[:] = value def combine_sliced_values(self, values): if type(values) is tuple: combined_values = tuple([sum(reshape(v, (self.slices, -1)), axis=0) for v in values]) else: combined_values = sum(reshape(values, (self.slices, -1)), axis=0) return combined_values def evaluate(self, **param_values): """ Use fitparams['delays'] to take delays into account Use fitparams['refractory'] to take refractory into account """ delays = param_values.pop('delays', zeros(self.neurons)) refractory = param_values.pop('refractory', zeros(self.neurons)) tau_metric = param_values.pop('tau_metric', zeros(self.neurons)) # repeat spike delays and refractory to take slices into account delays = kron(delays, ones(self.slices)) refractory = kron(refractory, ones(self.slices)) tau_metric = kron(tau_metric, ones(self.slices)) self.update_neurongroup(**param_values) if self.criterion.__class__.__name__ == 'Brette': self.initialize_criterion(delays,tau_metric) else: self.initialize_criterion(delays) if self.use_gpu: pass ######### # TODO ######### # # Reinitializes the simulation object # self.mf.reinit_vars(self.input, self.I_offset, self.spiketimes, self.spiketimes_offset, delays, refractory) # # LAUNCHES the simulation on the GPU # self.mf.launch(self.duration, self.stepsize) # coincidence_count = self.mf.coincidence_count # spike_count = self.mf.spike_count else: # set the refractory period if self.max_refractory is not None: self.group.refractory = refractory # Launch the simulation on the CPU self.group.clock.reinit() net = Network(self.group, self.criterion_object) net.run(self.duration) sliced_values = self.criterion_object.get_values() combined_values = self.combine_sliced_values(sliced_values) values = self.criterion_object.normalize(combined_values) return values
def __init__(self, lip_size, params, background_inputs=None, visual_cortex_input=None, go_input=None): self.lip_size=lip_size self.N=2*self.lip_size self.params=params self.background_inputs=background_inputs self.visual_cortex_input=visual_cortex_input self.go_input=go_input ## Set up equations # Exponential integrate-and-fire neuron eqs = exp_IF(params.C, params.gL, params.EL, params.VT, params.DeltaT) # AMPA conductance - recurrent input current eqs += exp_synapse('g_ampa_r', params.tau_ampa, siemens) eqs += Current('I_ampa_r=g_ampa_r*(E-vm): amp', E=params.E_ampa) # AMPA conductance - background input current eqs += exp_synapse('g_ampa_b', params.tau_ampa, siemens) eqs += Current('I_ampa_b=g_ampa_b*(E-vm): amp', E=params.E_ampa) # AMPA conductance - task input current eqs += exp_synapse('g_ampa_x', params.tau_ampa, siemens) eqs += Current('I_ampa_x=g_ampa_x*(E-vm): amp', E=params.E_ampa) # AMPA conductance - go input current eqs += exp_synapse('g_ampa_g', params.tau_ampa, siemens) eqs += Current('I_ampa_g=g_ampa_g*(E-vm): amp', E=params.E_ampa) # Voltage-dependent NMDA conductance eqs += biexp_synapse('g_nmda', params.tau1_nmda, params.tau2_nmda, siemens) eqs += Equations('g_V = 1/(1+(Mg/3.57)*exp(-0.062 *vm/mV)) : 1 ', Mg=params.Mg) eqs += Current('I_nmda=g_V*g_nmda*(E-vm): amp', E=params.E_nmda) # GABA-A conductance eqs += exp_synapse('g_gaba_a', params.tau_gaba_a, siemens) eqs += Current('I_gaba_a=g_gaba_a*(E-vm): amp', E=params.E_gaba_a) # GABA-B conductance eqs += biexp_synapse('g_gaba_b', params.tau1_gaba_b, params.tau2_gaba_b, siemens) eqs += Current('I_gaba_b=g_gaba_b*(E-vm): amp', E=params.E_gaba_b) # Total synaptic conductance eqs += Equations('g_syn=g_ampa_r+g_ampa_x+g_ampa_g+g_ampa_b+g_V*g_nmda+g_gaba_a+g_gaba_b : siemens') eqs += Equations('g_syn_exc=g_ampa_r+g_ampa_x+g_ampa_g+g_ampa_b+g_V*g_nmda : siemens') # Total synaptic current eqs += Equations('I_abs=abs(I_ampa_r)+abs(I_ampa_b)+abs(I_ampa_x)+abs(I_ampa_g)+abs(I_nmda)+abs(I_gaba_a) : amp') NeuronGroup.__init__(self, self.N, model=eqs, threshold=-20*mV, reset=params.EL, compile=True) self.init_subpopulations() self.connections=[] self.init_connectivity() if self.background_inputs is not None: # Background -> E+I population connections background_left_ampa=init_connection(self.background_inputs[0], self.left_lip.neuron_group, 'g_ampa_b', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_b_e, delay=5*ms) background_right_ampa=init_connection(self.background_inputs[1], self.right_lip.neuron_group, 'g_ampa_b', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_b_e, delay=5*ms) self.connections.append(background_left_ampa) self.connections.append(background_right_ampa) if self.visual_cortex_input is not None: # Task input -> E population connections vc_left_lip_ampa=init_connection(self.visual_cortex_input[0], self.left_lip.e_contra_vis, 'g_ampa_x', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_v_ec_vis, delay=270*ms) vc_right_lip_ampa=init_connection(self.visual_cortex_input[1], self.right_lip.e_contra_vis, 'g_ampa_x', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_v_ec_vis, delay=270*ms) self.connections.append(vc_left_lip_ampa) self.connections.append(vc_right_lip_ampa) if self.go_input is not None: go_left_lip_i_ampa=init_connection(self.go_input, self.left_lip.i_group, 'g_ampa_g', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_g_i, delay=5*ms) go_right_lip_i_ampa=init_connection(self.go_input, self.right_lip.i_group, 'g_ampa_g', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_g_i, delay=5*ms) go_left_lip_e_ampa=init_connection(self.go_input, self.left_lip.e_group, 'g_ampa_g', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_g_e, delay=5*ms) go_right_lip_e_ampa=init_connection(self.go_input, self.right_lip.e_group, 'g_ampa_g', self.params.w_ampa_min, self.params.w_ampa_max, self.params.p_g_e, delay=5*ms) self.connections.append(go_left_lip_i_ampa) self.connections.append(go_right_lip_i_ampa) self.connections.append(go_left_lip_e_ampa) self.connections.append(go_right_lip_e_ampa)
Vrest = 0*mV Vth = 20*mV tau = 20*ms Nnrns = 4 Ningroups = 1 Nin_per_group = 50 fin = 20*Hz ingroup_sync = [0.5] sigma = 0*ms weight = 2.0*mV Nallin = Nin_per_group*Ningroups Nin = 25 # total number of connections each cell receives lifeq_exc = Equations("dV/dt = (Vrest-V)/tau : volt") lifeq_exc.prepare() nrngroup = NeuronGroup(Nnrns, lifeq_exc, threshold="V>Vth", reset=Vrest, refractory=2*ms) nrngroup.V = Vrest network.add(nrngroup) print("Setting up inputs and connections ...") ingroups = [] inpconns = [] for ing in range(Ningroups): ingroup = sl.tools.fast_synchronous_input_gen(Nin_per_group, fin, ingroup_sync[ing], sigma, duration, shuffle=False) inpconn = Connection(ingroup, nrngroup, 'V') ingroups.append(ingroup) inpconns.append(inpconn) inputneurons = [] # CONNECTIONS
class Simulator(object): def __init__(self, model, reset, threshold, inputs, input_var = 'I', dt = defaultclock.dt, refractory = 0*ms, max_refractory = None, spikes = None, traces = None, groups = 1, slices = 1, overlap = 0*second, onset = 0*second, neurons = 1000, # = nodesize = number of neurons on this node = (total number of neurons on this node)/(number of slices) initial_values = None, unit_type = 'CPU', stepsize = 128*ms, precision = 'double', criterion = None, statemonitor_var=None, spikemonitor = False, nbr_spikes = 200, ntrials=1, method = 'Euler', # stand_alone=False, # neuron_group=None, # given_neuron_group=False ): # print refractory, max_refractory # self.neuron_group = neuron_group # self.given_neuron_group = False # self.stand_alone = given_neuron_group self.model = model self.reset = reset self.threshold = threshold self.inputs = inputs self.input_var = input_var self.dt = dt self.refractory = refractory self.max_refractory = max_refractory self.spikes = spikes self.traces = traces self.initial_values = initial_values self.groups = groups self.slices = slices self.overlap = overlap self.ntrials=ntrials self.onset = onset self.neurons = neurons self.unit_type = unit_type if type(statemonitor_var) is not list and statemonitor_var is not None: statemonitor_var = [statemonitor_var] self.statemonitor_var = statemonitor_var self.spikemonitor=spikemonitor self.nbr_spikes = nbr_spikes self.stepsize = stepsize self.precision = precision self.criterion = criterion self.method = method self.use_gpu = self.unit_type=='GPU' if self.statemonitor_var is not None: self.statemonitor_values = [zeros(self.neurons)]*len(statemonitor_var) self.initialize_neurongroup() self.transform_data() self.inject_input() if self.criterion.__class__.__name__ == 'Brette': self.initialize_criterion(delays=zeros(self.neurons),tau_metric=zeros(self.neurons)) else: self.initialize_criterion(delays=zeros(self.neurons)) if self.use_gpu: self.initialize_gpu() def initialize_neurongroup(self): # Add 'refractory' parameter on the CPU only if not self.use_gpu: if self.max_refractory is not None: refractory = 'refractory' self.model.add_param('refractory', second) else: refractory = self.refractory else: if self.max_refractory is not None: refractory = 0*ms else: refractory = self.refractory # Must recompile the Equations : the functions are not transfered after pickling/unpickling self.model.compile_functions() # print refractory, self.max_refractory if type(refractory) is double: refractory=refractory*second # if self.give_neuron_group == False: self.group = NeuronGroup(self.neurons, # TODO: * slices? model=self.model, reset=self.reset, threshold=self.threshold, refractory=refractory, max_refractory = self.max_refractory, method = self.method, clock=Clock(dt=self.dt)) if self.initial_values is not None: for param, value in self.initial_values.iteritems(): self.group.state(param)[:] = value # else: # self.group = self.neuron_group def initialize_gpu(self): # Select integration scheme according to method if self.method == 'Euler': scheme = euler_scheme elif self.method == 'RK': scheme = rk2_scheme elif self.method == 'exponential_Euler': scheme = exp_euler_scheme else: raise Exception("The numerical integration method is not valid") self.mf = GPUModelFitting(self.group, self.model, self.criterion_object, self.input_var, self.neurons/self.groups, self.onset, statemonitor_var = self.statemonitor_var, spikemonitor = self.spikemonitor, nbr_spikes = self.nbr_spikes, duration = self.sliced_duration, precision=self.precision, scheme=scheme) def transform_data(self): self.transformer = DataTransformer(self.neurons, self.inputs, spikes = self.spikes, traces = self.traces, dt = self.dt, slices = self.slices, overlap = self.overlap, groups = self.groups,ntrials=self.ntrials) self.total_steps = self.transformer.total_steps self.sliced_duration = self.transformer.sliced_duration if self.ntrials>1: self.inputs_inline = self.inputs.flatten() self.sliced_inputs = self.inputs self.inputs_offset = zeros(self.neurons) else: self.sliced_inputs = self.transformer.slice_traces(self.inputs) self.inputs_inline, self.inputs_offset = self.transformer.transform_traces(self.sliced_inputs) if self.traces is not None: self.sliced_traces = self.transformer.slice_traces(self.traces) self.traces_inline, self.traces_offset = self.transformer.transform_traces(self.sliced_traces) else: self.sliced_traces, self.traces_inline, self.traces_offset = None, None, None if self.spikes is not None: if self.ntrials>1: self.sliced_spikes = self.transformer.slice_spikes(self.spikes) self.spikes_inline, self.trials_offset = self.transformer.transform_trials(self.spikes) self.spikes_offset = zeros((self.neurons),dtype=int) else: self.sliced_spikes = self.transformer.slice_spikes(self.spikes) self.spikes_inline, self.spikes_offset = self.transformer.transform_spikes(self.sliced_spikes) self.trials_offset=[0] else: self.sliced_spikes, self.spikes_inline, self.spikes_offset,self.trials_offset = None, None, None, None def inject_input(self): # Injects current in consecutive subgroups, where I_offset have the same value # on successive intervals I_offset = self.inputs_offset k = -1 for i in hstack((nonzero(diff(I_offset))[0], len(I_offset) - 1)): I_offset_subgroup_value = I_offset[i] I_offset_subgroup_length = i - k sliced_subgroup = self.group.subgroup(I_offset_subgroup_length) input_sliced_values = self.inputs_inline[I_offset_subgroup_value:I_offset_subgroup_value + self.total_steps] sliced_subgroup.set_var_by_array(self.input_var, TimedArray(input_sliced_values, clock=self.group.clock)) k = i def initialize_criterion(self, **criterion_params): # general criterion parameters params = dict(group=self.group, traces=self.sliced_traces, spikes=self.sliced_spikes, targets_count=self.groups*self.slices, duration=self.sliced_duration, onset=self.onset, spikes_inline=self.spikes_inline, spikes_offset=self.spikes_offset, traces_inline=self.traces_inline, traces_offset=self.traces_offset,trials_offset=self.trials_offset) for key,val in criterion_params.iteritems(): params[key] = val criterion_name = self.criterion.__class__.__name__ # criterion-specific parameters if criterion_name == 'GammaFactor': params['delta'] = self.criterion.delta params['coincidence_count_algorithm'] = self.criterion.coincidence_count_algorithm params['fr_weight'] = self.criterion.fr_weight self.criterion_object = GammaFactorCriterion(**params) if criterion_name == 'GammaFactor2': params['delta'] = self.criterion.delta params['coincidence_count_algorithm'] = self.criterion.coincidence_count_algorithm params['fr_weight'] = self.criterion.fr_weight params['nlevels'] = self.criterion.nlevels params['level_duration'] = self.criterion.level_duration self.criterion_object = GammaFactorCriterion2(**params) if criterion_name == 'LpError': params['p'] = self.criterion.p params['varname'] = self.criterion.varname params['method'] = self.criterion.method params['insets'] = self.criterion.insets params['outsets'] = self.criterion.outsets params['points'] = self.criterion.points self.criterion_object = LpErrorCriterion(**params) if criterion_name == 'VanRossum': params['tau'] = self.criterion.tau self.criterion_object = VanRossumCriterion(**params) if criterion_name == 'Brette': self.criterion_object = BretteCriterion(**params) def update_neurongroup(self, **param_values): """ Inject fitting parameters into the NeuronGroup """ # Sets the parameter values in the NeuronGroup object self.group.reinit() for param, value in param_values.iteritems(): self.group.state(param)[:] = kron(value, ones(self.slices)) # kron param_values if slicing # Reinitializes the model variables if self.initial_values is not None: for param, value in self.initial_values.iteritems(): self.group.state(param)[:] = value def combine_sliced_values(self, values): if type(values) is tuple: combined_values = tuple([sum(reshape(v, (self.slices, -1)), axis=0) for v in values]) else: combined_values = sum(reshape(values, (self.slices, -1)), axis=0) return combined_values def run(self, **param_values): delays = param_values.pop('delays', zeros(self.neurons)) # print self.refractory,self.max_refractory if self.max_refractory is not None: refractory = param_values.pop('refractory', zeros(self.neurons)) else: refractory = self.refractory*ones(self.neurons) tau_metric = param_values.pop('tau_metric', zeros(self.neurons)) self.update_neurongroup(**param_values) # repeat spike delays and refractory to take slices into account delays = kron(delays, ones(self.slices)) refractory = kron(refractory, ones(self.slices)) tau_metric = kron(tau_metric, ones(self.slices)) # TODO: add here parameters to criterion_params if a criterion must use some parameters criterion_params = dict(delays=delays) if self.criterion.__class__.__name__ == 'Brette': criterion_params['tau_metric'] = tau_metric self.update_neurongroup(**param_values) self.initialize_criterion(**criterion_params) if self.use_gpu: # Reinitializes the simulation object self.mf.reinit_vars(self.criterion_object, self.inputs_inline, self.inputs_offset, self.spikes_inline, self.spikes_offset, self.traces_inline, self.traces_offset, delays, refractory ) # LAUNCHES the simulation on the GPU self.mf.launch(self.sliced_duration, self.stepsize) # Synchronize the GPU values with a call to gpuarray.get() self.criterion_object.update_gpu_values() else: # set the refractory period if self.max_refractory is not None: self.group.refractory = refractory # Launch the simulation on the CPU self.group.clock.reinit() net = Network(self.group, self.criterion_object) if self.statemonitor_var is not None: self.statemonitors = [] for state in self.statemonitor_var: monitor = StateMonitor(self.group, state, record=True) self.statemonitors.append(monitor) net.add(monitor) net.run(self.sliced_duration) sliced_values = self.criterion_object.get_values() combined_values = self.combine_sliced_values(sliced_values) values = self.criterion_object.normalize(combined_values) return values def get_statemonitor_values(self): if not self.use_gpu: return [monitor.values for monitor in self.statemonitors] else: return self.mf.get_statemonitor_values() def get_spikemonitor_values(self): if not self.use_gpu: return [monitor.values for monitor in self.statemonitors] else: return self.mf.get_spikemonitor_values()
def test_stim_pyramidal_impact(): simulation_clock = Clock(dt=.5 * ms) trial_duration = 1 * second dcs_start_time = .5 * second stim_levels = [-8, -6, -4, -2, -1, -.5, -.25, 0, .25, .5, 1, 2, 4, 6, 8] voltages = np.zeros(len(stim_levels)) for idx, stim_level in enumerate(stim_levels): print('testing stim_level %.3fpA' % stim_level) eqs = exp_IF(default_params.C, default_params.gL, default_params.EL, default_params.VT, default_params.DeltaT) # AMPA conductance - recurrent input current eqs += exp_synapse('g_ampa_r', default_params.tau_ampa, siemens) eqs += Current('I_ampa_r=g_ampa_r*(E-vm): amp', E=default_params.E_ampa) # AMPA conductance - background input current eqs += exp_synapse('g_ampa_b', default_params.tau_ampa, siemens) eqs += Current('I_ampa_b=g_ampa_b*(E-vm): amp', E=default_params.E_ampa) # AMPA conductance - task input current eqs += exp_synapse('g_ampa_x', default_params.tau_ampa, siemens) eqs += Current('I_ampa_x=g_ampa_x*(E-vm): amp', E=default_params.E_ampa) # Voltage-dependent NMDA conductance eqs += biexp_synapse('g_nmda', default_params.tau1_nmda, default_params.tau2_nmda, siemens) eqs += Equations('g_V = 1/(1+(Mg/3.57)*exp(-0.062 *vm/mV)) : 1 ', Mg=default_params.Mg) eqs += Current('I_nmda=g_V*g_nmda*(E-vm): amp', E=default_params.E_nmda) # GABA-A conductance eqs += exp_synapse('g_gaba_a', default_params.tau_gaba_a, siemens) eqs += Current('I_gaba_a=g_gaba_a*(E-vm): amp', E=default_params.E_gaba_a) eqs += InjectedCurrent('I_dcs: amp') group = NeuronGroup(1, model=eqs, threshold=-20 * mV, refractory=pyr_params.refractory, reset=default_params.Vr, compile=True, freeze=True, clock=simulation_clock) group.C = pyr_params.C group.gL = pyr_params.gL @network_operation(clock=simulation_clock) def inject_current(c): if simulation_clock.t > dcs_start_time: group.I_dcs = stim_level * pA monitor = StateMonitor(group, 'vm', simulation_clock, record=True) net = Network(group, monitor, inject_current) net.run(trial_duration, report='text') voltages[idx] = monitor.values[0, -1] * 1000 voltages = voltages - voltages[7] plt.figure() plt.plot(stim_levels, voltages) plt.xlabel('Stimulation level (pA)') plt.ylabel('Voltage Change (mV)') plt.show()
def __init__(self, clock, params=zheng_params, network=None): eqs=Equations(''' G_total : siemens G_total_exc : siemens ds/dt=eta*(G_total-G_base)/G_base-s/tau_s-(f_in-1.0)/tau_f : 1 df_in/dt=s/second : 1.0 dv/dt=1/tau_o*(f_in-f_out) : 1 f_out=v**(1.0/alpha) : 1 do_e/dt=1.0/(phi/f_in)*(-o_e+(1.0-g)*(1.0-(1.0-e_base/(1.0-g_0))**(1.0/f_in))) : %.4f dcb/dt=1.0/(phi/f_in)*(-cb-(c_ab*o_e)/oe_log+c_ab*g) : 1 oe_log : 1 cmr_o=(cb-g*c_ab)/(cb_0-g_0*c_ab) : 1 dg/dt=1.0/(j*v_ratio*((r*transitTime)/e_base))*((cmr_o-1.0)-k*s) : %.4f dq/dt=1/tau_o*((f_in*o_e/e_base)-f_out*q/v) : 1 y=v_0*((k1+k2)*(1-q)-(k2+k3)*(1-v)) : 1 G_base : siemens eta : 1/second tau_s : second tau_f : second alpha : 1 tau_o : second v_0 : 1 k1 : 1 k2 : 1 k3 : 1 phi : %.4f*second e_base : %.4f g_0 : %.4f c_ab : 1 cb_0 : 1 v_ratio : 1 j : 1 transitTime : second k : 1 r : 1 ''' % (params.e_base, params.g_0, params.phi, params.e_base, params.g_0)) NeuronGroup.__init__(self, 1, model=eqs, clock=clock, compile=True, freeze=True) self.params=params self.G_base=params.G_base self.eta=params.eta self.tau_s=params.tau_s self.tau_f=params.tau_f self.alpha=params.alpha self.tau_o=params.tau_o self.e_base=params.e_base self.v_0=params.v_0 self.k1=params.k1 self.params.s_e=params.s_e_0*exp(-params.TE/params.T_2E) self.params.s_i=params.s_i_0*exp(-params.TE/params.T_2I) self.params.beta=self.params.s_e/self.params.s_i self.k2=self.params.beta*params.r_0*self.e_base*params.TE self.k3=self.params.beta-1.0 self.c_ab=self.params.c_ab self.cb_0=self.params.cb_0 self.g_0=self.params.g_0 self.phi=self.params.phi self.v_ratio=self.params.v_ratio self.j=self.params.j self.transitTime=self.params.transitTime self.k=self.params.k self.r=self.params.r self.f_in=1.0 self.s=0.0 self.v=1.0 self.o_e=self.e_base self.cb=self.cb_0 self.g=self.g_0 self.oe_log=np.log(1.0-self.o_e/(1.0-self.g)) self.q=1.0 if network is not None: self.G_total = linked_var(network, 'g_syn', func=sum) self.G_total_exc = linked_var(network, 'g_syn_exc', func=sum)
class Simulator(object): def __init__( self, model, reset, threshold, inputs, input_var='I', dt=defaultclock.dt, refractory=0 * ms, max_refractory=None, spikes=None, traces=None, groups=1, slices=1, overlap=0 * second, onset=0 * second, neurons=1000, # = nodesize = number of neurons on this node = total number of neurons/slices initial_values=None, unit_type='CPU', stepsize=100 * ms, precision='double', criterion=None, statemonitor_var=None, method='Euler'): self.model = model self.reset = reset self.threshold = threshold self.inputs = inputs self.input_var = input_var self.dt = dt self.refractory = refractory self.max_refractory = max_refractory self.spikes = spikes self.traces = traces self.initial_values = initial_values self.groups = groups self.slices = slices self.overlap = overlap self.onset = onset self.neurons = neurons self.unit_type = unit_type self.statemonitor_var = statemonitor_var self.stepsize = stepsize self.precision = precision self.criterion = criterion self.method = method self.use_gpu = self.unit_type == 'GPU' if self.statemonitor_var is not None: self.statemonitor_values = zeros(self.neurons) self.initialize_neurongroup() self.transform_data() self.inject_input() self.initialize_criterion(delays=zeros(self.neurons)) if self.use_gpu: self.initialize_gpu() def initialize_neurongroup(self): # Add 'refractory' parameter on the CPU only if not self.use_gpu: if self.max_refractory is not None: refractory = 'refractory' self.model.add_param('refractory', second) else: refractory = self.refractory else: if self.max_refractory is not None: refractory = 0 * ms else: refractory = self.refractory # Must recompile the Equations : the functions are not transfered after pickling/unpickling self.model.compile_functions() self.group = NeuronGroup(self.neurons, model=self.model, reset=self.reset, threshold=self.threshold, refractory=refractory, max_refractory=self.max_refractory, method=self.method, clock=Clock(dt=self.dt)) if self.initial_values is not None: for param, value in self.initial_values.iteritems(): self.group.state(param)[:] = value def initialize_gpu(self): # Select integration scheme according to method if self.method == 'Euler': scheme = euler_scheme elif self.method == 'RK': scheme = rk2_scheme elif self.method == 'exponential_Euler': scheme = exp_euler_scheme else: raise Exception("The numerical integration method is not valid") self.mf = GPUModelFitting(self.group, self.model, self.criterion_object, self.input_var, self.onset, statemonitor_var=self.statemonitor_var, duration=self.sliced_duration, precision=self.precision, scheme=scheme) def transform_data(self): self.transformer = DataTransformer(self.neurons, self.inputs, spikes=self.spikes, traces=self.traces, dt=self.dt, slices=self.slices, overlap=self.overlap, groups=self.groups) self.total_steps = self.transformer.total_steps self.sliced_duration = self.transformer.sliced_duration self.sliced_inputs = self.transformer.slice_traces(self.inputs) self.inputs_inline, self.inputs_offset = self.transformer.transform_traces( self.sliced_inputs) if self.traces is not None: self.sliced_traces = self.transformer.slice_traces(self.traces) self.traces_inline, self.traces_offset = self.transformer.transform_traces( self.sliced_traces) else: self.sliced_traces, self.traces_inline, self.traces_offset = None, None, None if self.spikes is not None: self.sliced_spikes = self.transformer.slice_spikes(self.spikes) self.spikes_inline, self.spikes_offset = self.transformer.transform_spikes( self.sliced_spikes) else: self.sliced_spikes, self.spikes_inline, self.spikes_offset = None, None, None def inject_input(self): # Injects current in consecutive subgroups, where I_offset have the same value # on successive intervals I_offset = self.inputs_offset k = -1 for i in hstack((nonzero(diff(I_offset))[0], len(I_offset) - 1)): I_offset_subgroup_value = I_offset[i] I_offset_subgroup_length = i - k sliced_subgroup = self.group.subgroup(I_offset_subgroup_length) input_sliced_values = self.inputs_inline[ I_offset_subgroup_value:I_offset_subgroup_value + self.total_steps] sliced_subgroup.set_var_by_array( self.input_var, TimedArray(input_sliced_values, clock=self.group.clock)) k = i def initialize_criterion(self, **criterion_params): # general criterion parameters params = dict(group=self.group, traces=self.sliced_traces, spikes=self.sliced_spikes, targets_count=self.groups * self.slices, duration=self.sliced_duration, onset=self.onset, spikes_inline=self.spikes_inline, spikes_offset=self.spikes_offset, traces_inline=self.traces_inline, traces_offset=self.traces_offset) for key, val in criterion_params.iteritems(): params[key] = val criterion_name = self.criterion.__class__.__name__ # criterion-specific parameters if criterion_name == 'GammaFactor': params['delta'] = self.criterion.delta params[ 'coincidence_count_algorithm'] = self.criterion.coincidence_count_algorithm self.criterion_object = GammaFactorCriterion(**params) if criterion_name == 'LpError': params['p'] = self.criterion.p params['varname'] = self.criterion.varname self.criterion_object = LpErrorCriterion(**params) def update_neurongroup(self, **param_values): """ Inject fitting parameters into the NeuronGroup """ # Sets the parameter values in the NeuronGroup object self.group.reinit() for param, value in param_values.iteritems(): self.group.state(param)[:] = kron(value, ones( self.slices)) # kron param_values if slicing # Reinitializes the model variables if self.initial_values is not None: for param, value in self.initial_values.iteritems(): self.group.state(param)[:] = value def combine_sliced_values(self, values): if type(values) is tuple: combined_values = tuple( [sum(reshape(v, (self.slices, -1)), axis=0) for v in values]) else: combined_values = sum(reshape(values, (self.slices, -1)), axis=0) return combined_values def run(self, **param_values): delays = param_values.pop('delays', zeros(self.neurons)) refractory = param_values.pop('refractory', zeros(self.neurons)) self.update_neurongroup(**param_values) # repeat spike delays and refractory to take slices into account delays = kron(delays, ones(self.slices)) refractory = kron(refractory, ones(self.slices)) # TODO: add here parameters to criterion_params if a criterion must use some parameters criterion_params = dict(delays=delays) self.update_neurongroup(**param_values) self.initialize_criterion(**criterion_params) if self.use_gpu: # Reinitializes the simulation object self.mf.reinit_vars(self.criterion_object, self.inputs_inline, self.inputs_offset, self.spikes_inline, self.spikes_offset, self.traces_inline, self.traces_offset, delays, refractory) # LAUNCHES the simulation on the GPU self.mf.launch(self.sliced_duration, self.stepsize) # Synchronize the GPU values with a call to gpuarray.get() self.criterion_object.update_gpu_values() else: # set the refractory period if self.max_refractory is not None: self.group.refractory = refractory # Launch the simulation on the CPU self.group.clock.reinit() net = Network(self.group, self.criterion_object) if self.statemonitor_var is not None: self.statemonitor = StateMonitor(self.group, self.statemonitor_var, record=True) net.add(self.statemonitor) net.run(self.sliced_duration) sliced_values = self.criterion_object.get_values() combined_values = self.combine_sliced_values(sliced_values) values = self.criterion_object.normalize(combined_values) return values def get_statemonitor_values(self): if not self.use_gpu: return self.statemonitor.values else: return self.mf.get_statemonitor_values()
tau1=tau1, tau2=tau2e, ) model_eqs_i = Equations(eqs_string, tau=taui, tau1=tau1, tau2=tau2i, ) # Divide the neurons into excitatory and inhibitory ones neurons_e = NeuronGroup(N=N_e, model=model_eqs_e, threshold=V_th, reset=V_reset, refractory=refr_period, freeze = True, method='Euler', compile=True) neurons_i = NeuronGroup(N=N_i, model=model_eqs_i, threshold=V_th, reset=V_reset, refractory=refr_period, freeze = True, method='Euler', compile=True) neurons_e.myu = np.random.uniform(myueMin, myueMax, N_e)
betah=1./(exp(-0.1/mV*(v+28*mV))+1)/ms : Hz dn/dt=5*(alphan*(1-n)-betan*n) : 1 alphan=-0.01/mV*(v+34*mV)/(exp(-0.1/mV*(v+34*mV))-1)/ms : Hz betan=0.125*exp(-(v+44*mV)/(80*mV))/ms : Hz dgExc/dt = -gExc*(1./taue) : siemens dgInh/dt = -gInh*(1./taui) : siemens Iapp : amp ''' neuron = NeuronGroup(len(inputcurrents), eqs, threshold=threshold, method='RK') sim.add(neuron) # Init conditions neuron.v = -65*mV neuron.Iapp = inputcurrents neuron.h = 1 # Monitors vmon = StateMonitor(neuron, 'v', record=True) nmon = StateMonitor(neuron, 'n', record=True) sim.add(vmon, nmon) # Run sim.run(duration, report='text')