def run_pop_code(pop_class, N, network_params, stimuli, trial_duration, report=None): simulation_clock=Clock(dt=1*ms) pop=pop_class(N,simulation_clock,network_params) pop_monitor=MultiStateMonitor(pop, vars=['x','r','e'], record=True, clock=simulation_clock) @network_operation(when='start', clock=simulation_clock) def get_pop_input(): pop.x=0.0 for stimulus in stimuli: if stimulus.start_time<simulation_clock.t<stimulus.end_time: pop.x+=pop.get_population_function(stimulus.x,stimulus.var) net=Network(pop, pop_monitor, get_pop_input) #reinit_default_clock() net.run(trial_duration, report=report) g_total=np.sum(np.clip(pop_monitor['e'].values,0,1) * pop_monitor['x'].values, axis=0)+0.1 voxel_monitor=get_bold_signal(g_total, voxel.default_params, range(int(stimuli[0].start_time/simulation_clock.dt)), trial_duration) # There is only one peak with rapid design if trial_duration>6*second: y_max=np.max(voxel_monitor['y'][0][60000:]) else: y_max=np.max(voxel_monitor['y'][0]) return pop_monitor, voxel_monitor, y_max
def __init__(self, subj_id, wta_params=default_params(), pyr_params=pyr_params(), inh_params=inh_params(), plasticity_params=plasticity_params(), sim_params=simulation_params()): self.subj_id = subj_id self.wta_params = wta_params self.pyr_params = pyr_params self.inh_params = inh_params self.plasticity_params = plasticity_params self.sim_params = sim_params self.simulation_clock = Clock(dt=self.sim_params.dt) self.input_update_clock = Clock(dt=1 / (self.wta_params.refresh_rate / Hz) * second) self.background_input = PoissonGroup(self.wta_params.background_input_size, rates=self.wta_params.background_freq, clock=self.simulation_clock) self.task_inputs = [] for i in range(self.wta_params.num_groups): self.task_inputs.append(PoissonGroup(self.wta_params.task_input_size, rates=self.wta_params.task_input_resting_rate, clock=self.simulation_clock)) # Create WTA network self.wta_network = WTANetworkGroup(params=self.wta_params, background_input=self.background_input, task_inputs=self.task_inputs, pyr_params=self.pyr_params, inh_params=self.inh_params, plasticity_params=self.plasticity_params, clock=self.simulation_clock) # Create network monitor self.wta_monitor = WTAMonitor(self.wta_network, None, None, self.sim_params, record_lfp=False, record_voxel=False, record_neuron_state=False, record_spikes=False, record_firing_rate=True, record_inputs=True, record_connections=None, save_summary_only=False, clock=self.simulation_clock) # Create Brian network and reset clock self.net = Network(self.background_input, self.task_inputs, self.wta_network, self.wta_network.connections.values(), self.wta_monitor.monitors.values())
def get_bold_signal(g_total, voxel_params, baseline_range, trial_duration): simulation_clock = Clock(dt=1 * ms) voxel = Voxel(simulation_clock, params=voxel_params) voxel.G_base = g_total[baseline_range[0]:baseline_range[1]].mean() voxel_monitor = MultiStateMonitor( voxel, vars=['G_total', 's', 'f_in', 'v', 'f_out', 'q', 'y'], record=True, clock=simulation_clock) @network_operation(when='start', clock=simulation_clock) def get_input(): idx = int(simulation_clock.t / simulation_clock.dt) if idx < baseline_range[0]: voxel.G_total = voxel.G_base elif idx < len(g_total): voxel.G_total = g_total[idx] else: voxel.G_total = voxel.G_base net = Network(voxel, get_input, voxel_monitor) #reinit_default_clock() bold_trial_duration = 10 * second if trial_duration + 6 * second > bold_trial_duration: bold_trial_duration = trial_duration + 6 * second net.run(bold_trial_duration) return voxel_monitor
def get_bold_signal(g_total, voxel_params, baseline_range, trial_duration): simulation_clock=Clock(dt=1*ms) voxel=Voxel(simulation_clock, params=voxel_params) voxel.G_base=g_total[baseline_range[0]:baseline_range[1]].mean() voxel_monitor = MultiStateMonitor(voxel, vars=['G_total','s','f_in','v','f_out','q','y'], record=True, clock=simulation_clock) @network_operation(when='start', clock=simulation_clock) def get_input(): idx=int(simulation_clock.t/simulation_clock.dt) if idx<baseline_range[0]: voxel.G_total=voxel.G_base elif idx<len(g_total): voxel.G_total=g_total[idx] else: voxel.G_total=voxel.G_base net=Network(voxel, get_input, voxel_monitor) #reinit_default_clock() bold_trial_duration=10*second if trial_duration+6*second>bold_trial_duration: bold_trial_duration=trial_duration+6*second net.run(bold_trial_duration) return voxel_monitor
def generate_data(): g = NeuronGroup(1, model=equations, reset=0, threshold=1) g.I = TimedArray(input, dt=.1*ms) g.tau = 25*ms g.R = 3e9 SpM = SpikeMonitor(g) StM = StateMonitor(g, 'V', record=True) net = Network(g, SpM, StM) net.run(1*second) return StM.values[0], SpM.spikes
def generate_data(): g = NeuronGroup(1, model=equations, reset=0, threshold=1) g.I = TimedArray(input, dt=.1 * ms) g.tau = 25 * ms g.R = 3e9 SpM = SpikeMonitor(g) StM = StateMonitor(g, 'V', record=True) net = Network(g, SpM, StM) net.run(1 * second) return StM.values[0], SpM.spikes
def test_stim_pyramidal_impact(): simulation_clock=Clock(dt=.5*ms) trial_duration=1*second dcs_start_time=.5*second stim_levels=[-8,-6,-4,-2,-1,-.5,-.25,0,.25,.5,1,2,4,6,8] voltages = np.zeros(len(stim_levels)) for idx,stim_level in enumerate(stim_levels): print('testing stim_level %.3fpA' % stim_level) eqs = exp_IF(default_params.C, default_params.gL, default_params.EL, default_params.VT, default_params.DeltaT) # AMPA conductance - recurrent input current eqs += exp_synapse('g_ampa_r', default_params.tau_ampa, siemens) eqs += Current('I_ampa_r=g_ampa_r*(E-vm): amp', E=default_params.E_ampa) # AMPA conductance - background input current eqs += exp_synapse('g_ampa_b', default_params.tau_ampa, siemens) eqs += Current('I_ampa_b=g_ampa_b*(E-vm): amp', E=default_params.E_ampa) # AMPA conductance - task input current eqs += exp_synapse('g_ampa_x', default_params.tau_ampa, siemens) eqs += Current('I_ampa_x=g_ampa_x*(E-vm): amp', E=default_params.E_ampa) # Voltage-dependent NMDA conductance eqs += biexp_synapse('g_nmda', default_params.tau1_nmda, default_params.tau2_nmda, siemens) eqs += Equations('g_V = 1/(1+(Mg/3.57)*exp(-0.062 *vm/mV)) : 1 ', Mg=default_params.Mg) eqs += Current('I_nmda=g_V*g_nmda*(E-vm): amp', E=default_params.E_nmda) # GABA-A conductance eqs += exp_synapse('g_gaba_a', default_params.tau_gaba_a, siemens) eqs += Current('I_gaba_a=g_gaba_a*(E-vm): amp', E=default_params.E_gaba_a) eqs +=InjectedCurrent('I_dcs: amp') group=NeuronGroup(1, model=eqs, threshold=-20*mV, refractory=pyr_params.refractory, reset=default_params.Vr, compile=True, freeze=True, clock=simulation_clock) group.C=pyr_params.C group.gL=pyr_params.gL @network_operation(clock=simulation_clock) def inject_current(c): if simulation_clock.t>dcs_start_time: group.I_dcs=stim_level*pA monitor=StateMonitor(group, 'vm', simulation_clock, record=True) net=Network(group, monitor, inject_current) net.run(trial_duration, report='text') voltages[idx]=monitor.values[0,-1]*1000 voltages=voltages-voltages[7] plt.figure() plt.plot(stim_levels,voltages) plt.xlabel('Stimulation level (pA)') plt.ylabel('Voltage Change (mV)') plt.show()
def __init__(self, subj_id, wta_params=default_params(), pyr_params=pyr_params(), inh_params=inh_params(), sim_params=simulation_params(), network_class=WTANetworkGroup): self.subj_id = subj_id self.wta_params = wta_params self.pyr_params = pyr_params self.inh_params = inh_params self.sim_params = sim_params self.simulation_clock = Clock(dt=self.sim_params.dt) self.input_update_clock = Clock(dt=1 / (self.wta_params.refresh_rate / Hz) * second) self.background_input = PoissonGroup(self.wta_params.background_input_size, rates=self.wta_params.background_freq, clock=self.simulation_clock) self.task_inputs = [] for i in range(self.wta_params.num_groups): self.task_inputs.append(PoissonGroup(self.wta_params.task_input_size, rates=self.wta_params.task_input_resting_rate, clock=self.simulation_clock)) # Create WTA network self.wta_network = network_class(params=self.wta_params, background_input=self.background_input, task_inputs=self.task_inputs, pyr_params=self.pyr_params, inh_params=self.inh_params, clock=self.simulation_clock) # Create network monitor self.wta_monitor = WTAMonitor(self.wta_network, self.sim_params, record_neuron_state=False, record_spikes=False, record_firing_rate=True, record_inputs=True, save_summary_only=False, clock=self.simulation_clock) # Create Brian network and reset clock self.net = Network(self.background_input, self.task_inputs, self.wta_network, self.wta_network.connections.values(), self.wta_monitor.monitors.values())
def evaluate(self, **param_values): """ Use fitparams['delays'] to take delays into account Use fitparams['refractory'] to take refractory into account """ delays = param_values.pop('delays', zeros(self.neurons)) refractory = param_values.pop('refractory', zeros(self.neurons)) tau_metric = param_values.pop('tau_metric', zeros(self.neurons)) # repeat spike delays and refractory to take slices into account delays = kron(delays, ones(self.slices)) refractory = kron(refractory, ones(self.slices)) tau_metric = kron(tau_metric, ones(self.slices)) self.update_neurongroup(**param_values) if self.criterion.__class__.__name__ == 'Brette': self.initialize_criterion(delays,tau_metric) else: self.initialize_criterion(delays) if self.use_gpu: pass ######### # TODO ######### # # Reinitializes the simulation object # self.mf.reinit_vars(self.input, self.I_offset, self.spiketimes, self.spiketimes_offset, delays, refractory) # # LAUNCHES the simulation on the GPU # self.mf.launch(self.duration, self.stepsize) # coincidence_count = self.mf.coincidence_count # spike_count = self.mf.spike_count else: # set the refractory period if self.max_refractory is not None: self.group.refractory = refractory # Launch the simulation on the CPU self.group.clock.reinit() net = Network(self.group, self.criterion_object) net.run(self.duration) sliced_values = self.criterion_object.get_values() combined_values = self.combine_sliced_values(sliced_values) values = self.criterion_object.normalize(combined_values) return values
def ousim(mu_amp, mu_offs, sigma_amp, sigma_offs, freq, V_th): # mu_amp, mu_offs, sigma_amp, sigma_offs, freq, V_th = config if sigma_amp > sigma_offs: sigma_amp = sigma_offs # print("Setting up OU LIF simulation...") ounet = Network() clock.reinit_default_clock() eqs =Equations('dV/dt = mu-(V+V0)/tau + sigma*I/sqrt(dt) : volt') eqs+=Equations('dI/dt = -I/dt + xi/sqrt(dt) : 1') eqs+=Equations('mu = mu_amp*sin(t*freq*2*pi) + mu_offs : volt/second') eqs+=Equations('sigma = sigma_amp*sin(t*freq*2*pi) + sigma_offs :' ' volt/sqrt(second)') eqs.prepare() ounrn = NeuronGroup(1, eqs, threshold=V_th, refractory=t_refr, reset=V_reset) ounet.add(ounrn) ounrn.V = V0 V_mon = StateMonitor(ounrn, 'V', record=True) st_mon = SpikeMonitor(ounrn) ounet.add(V_mon, st_mon) ounet.run(duration) V_mon.insert_spikes(st_mon, value=V_th*2) times = V_mon.times membrane = V_mon[0] return times, st_mon.spiketimes[0], membrane
def setup_sims(neuron_params, input_params, duration): fin = input_params.get("fin") fout = input_params.get("fout") weight = input_params.get("weight") num_inp = input_params.get("num_inp") sync_configs = input_params.get("sync") if fin is None: fin = sl.tools.calibrate_frequencies(neuron_params, N_in=num_inp, w_in=weight, f_out=fout, synchrony_conf=sync_configs) brian.clear(True) gc.collect() brian.defaultclock.reinit() neurons = NeuronGroup(N=len(sync_configs), **neuron_params) simulation = Network(neurons) input_groups = [] for idx, (inrate, (sync, jitter)) in enumerate(zip(fin, sync_configs)): inp_grp = sl.tools.fast_synchronous_input_gen(num_inp, inrate*Hz, sync, jitter, duration) simulation.add(inp_grp) inp_conn = Connection(inp_grp, neurons[idx], state='V', weight=weight) input_groups.append(inp_grp) simulation.add(inp_conn) tracemon = StateMonitor(neurons, 'V', record=True) spikemon = SpikeMonitor(neurons) inputmons = [SpikeMonitor(igrp) for igrp in input_groups] simulation.add(tracemon, spikemon, inputmons) monitors = {"inputs": inputmons, "outputs": spikemon, "traces": tracemon} return simulation, monitors
def pif_reset(): defaultclock.reinit() sim = Network() I = 0.2*nA R = 1*Mohm lifeq = """ dV/dt = I*R/ms : volt Vth : volt """ thstep = 15*mV nrn = NeuronGroup(1, lifeq, threshold="V>=Vth", reset="V=0*mV") nrn.V = 0*mV nrn.Vth = thstep sim.add(nrn) #connection = Connection(inputgrp, nrn, state="V", weight=0.5*mV) #sim.add(inputgrp, connection) vmon = StateMonitor(nrn, "V", record=True) thmon = StateMonitor(nrn, "Vth", record=True) spikemon = SpikeMonitor(nrn, record=True) sim.add(vmon, thmon, spikemon) sim.run(duration) return vmon, thmon, spikemon
def fun(sigma, args): """ This function computes the mean firing rate of a LIF neuron with white noise input current (OU process with threshold). """ if not isscalar(sigma): raise Exception('sigma must be a scalar') N = args['N'] tau = args['tau'] model = args['model'] reset = args['reset'] threshold = args['threshold'] duration = args['duration'] G = NeuronGroup(N, model=model, reset=reset, threshold=threshold) M = SpikeCounter(G) net = Network(G, M) net.run(duration) r = M.nspikes * 1.0 / N return r
def run_restricted_pop_code(pop_class, N, network_params, stimuli, trial_duration, report=None): simulation_clock=Clock(dt=1*ms) pop=pop_class(N, simulation_clock, network_params) #pop_monitor=MultiStateMonitor(pop, vars=['x','r','e','total_e','total_r'], record=True) pop_monitor=MultiStateMonitor(pop, vars=['x','r','e'], record=True, clock=simulation_clock) @network_operation(when='start', clock=simulation_clock) def get_pop_input(): pop.x=0.0 for stimulus in stimuli: if stimulus.start_time<simulation_clock.t<stimulus.end_time: pop.x+=pop.get_population_function(stimulus.x,stimulus.var) net=Network(pop, pop_monitor, get_pop_input) #reinit_default_clock() net.run(trial_duration, report=report) g_total=np.sum(np.clip(pop_monitor['e'].values,0,1) * pop_monitor['x'].values, axis=0)+0.1 voxel_monitor=get_bold_signal(g_total, voxel.default_params, range(int(stimuli[0].start_time/simulation_clock.dt)), trial_duration) return voxel_monitor
def run(self, **param_values): delays = param_values.pop('delays', zeros(self.neurons)) # print self.refractory,self.max_refractory if self.max_refractory is not None: refractory = param_values.pop('refractory', zeros(self.neurons)) else: refractory = self.refractory*ones(self.neurons) tau_metric = param_values.pop('tau_metric', zeros(self.neurons)) self.update_neurongroup(**param_values) # repeat spike delays and refractory to take slices into account delays = kron(delays, ones(self.slices)) refractory = kron(refractory, ones(self.slices)) tau_metric = kron(tau_metric, ones(self.slices)) # TODO: add here parameters to criterion_params if a criterion must use some parameters criterion_params = dict(delays=delays) if self.criterion.__class__.__name__ == 'Brette': criterion_params['tau_metric'] = tau_metric self.update_neurongroup(**param_values) self.initialize_criterion(**criterion_params) if self.use_gpu: # Reinitializes the simulation object self.mf.reinit_vars(self.criterion_object, self.inputs_inline, self.inputs_offset, self.spikes_inline, self.spikes_offset, self.traces_inline, self.traces_offset, delays, refractory ) # LAUNCHES the simulation on the GPU self.mf.launch(self.sliced_duration, self.stepsize) # Synchronize the GPU values with a call to gpuarray.get() self.criterion_object.update_gpu_values() else: # set the refractory period if self.max_refractory is not None: self.group.refractory = refractory # Launch the simulation on the CPU self.group.clock.reinit() net = Network(self.group, self.criterion_object) if self.statemonitor_var is not None: self.statemonitors = [] for state in self.statemonitor_var: monitor = StateMonitor(self.group, state, record=True) self.statemonitors.append(monitor) net.add(monitor) net.run(self.sliced_duration) sliced_values = self.criterion_object.get_values() combined_values = self.combine_sliced_values(sliced_values) values = self.criterion_object.normalize(combined_values) return values
def get_spikes(model=None, reset=None, threshold=None, input=None, input_var='I', dt=None, **params): """ Retrieves the spike times corresponding to the best parameters found by the modelfitting function. **Arguments** ``model``, ``reset``, ``threshold``, ``input``, ``input_var``, ``dt`` Same parameters as for the ``modelfitting`` function. ``**params`` The best parameters returned by the ``modelfitting`` function. **Returns** ``spiketimes`` The spike times of the model with the given input and parameters. """ duration = len(input) * dt ngroups = len(params[params.keys()[0]]) group = NeuronGroup(N=ngroups, model=model, reset=reset, threshold=threshold, clock=Clock(dt=dt)) group.set_var_by_array(input_var, TimedArray(input, clock=group.clock)) for param, values in params.iteritems(): if (param == 'delays') | (param == 'fitness'): continue group.state(param)[:] = values M = SpikeMonitor(group) net = Network(group, M) net.run(duration) reinit_default_clock() return M.spikes
""" Script for testing brian on NSG portal. Still trying to figure out how it all works. """ from brian import (Network, NeuronGroup, StateMonitor, SpikeMonitor, PoissonInput, mV, ms, second, Hz) import numpy as np network = Network() tau = 20*ms eqs = "dV/dt = -V/tau : volt" lifgroup = NeuronGroup(10, eqs, threshold="V>=(20*mV)", reset=0*mV) weights = np.linspace(0.1, 1, 10) rates = np.arange(10, 100, 10) inputgroups = [] for idx, (w, r) in enumerate(zip(weights, rates)): inpgrp = PoissonInput(lifgroup[idx], 20, r*Hz, w*mV, state="V") inputgroups.append(inpgrp) network.add(lifgroup) network.add(*inputgroups) spikemon = SpikeMonitor(lifgroup) vmon = StateMonitor(lifgroup, "V", record=True) network.add(spikemon, vmon) network.run(10*second, report="stdout") spikes = spikemon.spiketimes.values()
from brian import (Network, NeuronGroup, StateMonitor, SpikeMonitor, Connection, mV, ms, Hz) import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import spikerlib as sl import numpy as np import sys sim = Network() duration = 200*ms dt = 0.1*ms tau = 10*ms Vth = 15*mV Vreset = 0*mV Vreset = 13.65*mV lifeq = "dV/dt = -V/tau : volt" lifnrn = NeuronGroup(1, lifeq, threshold="V>=Vth", reset=Vreset) lifnrn.V = Vreset sim.add(lifnrn) Nin = 200 fin = 80*Hz Sin = 0.6 sigma = 0.0*ms weight = 0.1*mV inputs = sl.tools.fast_synchronous_input_gen(Nin, fin, Sin, sigma, duration) connection = Connection(inputs, lifnrn, "V", weight=weight) sim.add(inputs, connection)
input_dist = dist_inputs_interval(inputidces, outspikes, inpmons) correlation = cor_movavg(slopes, input_dist, win) # TODO: negative correlation --- this should be fixed (maybe in the GA?) individual.fitness = 1-abs(correlation) def cor_movavg(slopes, kreuz, win): masl = mlab.movavg(slopes, win) makr = mlab.movavg(kreuz, win) return np.corrcoef(masl, makr)[1,0] def cor_movavg_all(allslopes, allkreuz, win): return [cor_movavg(sl, kr, win) for sl, kr in zip(allslopes, allkreuz)] print("Preparing simulation ...") doplot = False network = Network() defaultclock.dt = dt = 0.1*ms duration = 10*second w = 2*ms nkreuzsamples = 3 Vrest = 0*mV Vth = 20*mV tau = 20*ms Nnrns = 4 Ningroups = 1 Nin_per_group = 50 fin = 20*Hz ingroup_sync = [0.5] sigma = 0*ms weight = 2.0*mV Nallin = Nin_per_group*Ningroups
class VirtualSubject: def __init__(self, subj_id, wta_params=default_params(), pyr_params=pyr_params(), inh_params=inh_params(), plasticity_params=plasticity_params(), sim_params=simulation_params()): self.subj_id = subj_id self.wta_params = wta_params self.pyr_params = pyr_params self.inh_params = inh_params self.plasticity_params = plasticity_params self.sim_params = sim_params self.simulation_clock = Clock(dt=self.sim_params.dt) self.input_update_clock = Clock(dt=1 / (self.wta_params.refresh_rate / Hz) * second) self.background_input = PoissonGroup(self.wta_params.background_input_size, rates=self.wta_params.background_freq, clock=self.simulation_clock) self.task_inputs = [] for i in range(self.wta_params.num_groups): self.task_inputs.append(PoissonGroup(self.wta_params.task_input_size, rates=self.wta_params.task_input_resting_rate, clock=self.simulation_clock)) # Create WTA network self.wta_network = WTANetworkGroup(params=self.wta_params, background_input=self.background_input, task_inputs=self.task_inputs, pyr_params=self.pyr_params, inh_params=self.inh_params, plasticity_params=self.plasticity_params, clock=self.simulation_clock) # Create network monitor self.wta_monitor = WTAMonitor(self.wta_network, None, None, self.sim_params, record_lfp=False, record_voxel=False, record_neuron_state=False, record_spikes=False, record_firing_rate=True, record_inputs=True, record_connections=None, save_summary_only=False, clock=self.simulation_clock) # Create Brian network and reset clock self.net = Network(self.background_input, self.task_inputs, self.wta_network, self.wta_network.connections.values(), self.wta_monitor.monitors.values()) def run_trial(self, sim_params, input_freq): self.wta_monitor.sim_params=sim_params self.net.reinit(states=False) @network_operation(when='start', clock=self.input_update_clock) def set_task_inputs(): for idx in range(len(self.task_inputs)): rate = self.wta_params.task_input_resting_rate if sim_params.stim_start_time <= self.simulation_clock.t < sim_params.stim_end_time: rate = input_freq[idx] * Hz + np.random.randn() * self.wta_params.input_var if rate < self.wta_params.task_input_resting_rate: rate = self.wta_params.task_input_resting_rate self.task_inputs[idx]._S[0, :] = rate @network_operation(clock=self.simulation_clock) def inject_current(): if sim_params.dcs_start_time < self.simulation_clock.t <= sim_params.dcs_end_time: self.wta_network.group_e.I_dcs = sim_params.p_dcs self.wta_network.group_i.I_dcs = sim_params.i_dcs else: self.wta_network.group_e.I_dcs = 0 * pA self.wta_network.group_i.I_dcs = 0 * pA @network_operation(when='start', clock=self.simulation_clock) def inject_muscimol(): if sim_params.muscimol_amount > 0: self.wta_network.groups_e[sim_params.injection_site].g_muscimol = sim_params.muscimol_amount self.net.remove(set_task_inputs, inject_current, inject_muscimol, self.wta_network.stdp.values()) self.net.add(set_task_inputs, inject_current, inject_muscimol) if sim_params.plasticity: self.net.add(self.wta_network.stdp.values()) self.net.run(sim_params.trial_duration, report='text') #self.wta_monitor.plot() self.net.remove(set_task_inputs, inject_current, inject_muscimol, self.wta_network.stdp.values())
def run_neglect(input_freq, delay_duration, net_params=default_params, output_file=None, record_lfp=True, record_voxel=True, record_neuron_state=False, record_spikes=True, record_pop_firing_rate=True, record_neuron_firing_rate=False, record_inputs=False, plot_output=False, mem_trial=False): start_time=time() # Init simulation parameters background_input_size=1000 #background_rate=20*Hz #background_rate=30*Hz background_rate=25*Hz visual_input_size=1000 #visual_background_rate=10*Hz visual_background_rate=5*Hz #visual_stim_min_rate=15*Hz #visual_stim_min_rate=10*Hz visual_stim_min_rate=8*Hz visual_stim_tau=0.15 go_input_size=1000 go_rate=20*Hz #go_background_rate=1*Hz go_background_rate=0*Hz lip_size=6250 #stim_start_time=1.8*second #stim_end_time=2*second stim_start_time=.5*second stim_end_time=.7*second #go_start_time=3*second #go_end_time=3.1*second #go_start_time=1.7*second go_start_time=stim_end_time+delay_duration #go_end_time=1.8*second go_end_time=go_start_time+.2*second trial_duration=go_end_time+.5*second # Create network inputs background_inputs=[PoissonGroup(background_input_size, rates=background_rate), PoissonGroup(background_input_size, rates=background_rate)] def make_mem_rate_function(rate): return lambda t: ((stim_start_time<t<stim_end_time and np.max([visual_background_rate,rate*exp(-(t-stim_start_time)/visual_stim_tau)])) or visual_background_rate) def make_delay_rate_function(rate): return lambda t: ((stim_start_time<t and np.max([visual_stim_min_rate,rate*exp(-(t-stim_start_time)/visual_stim_tau)])) or visual_background_rate) def make_go_rate_function(): return lambda t: ((go_start_time<t<go_end_time and go_rate) or go_background_rate) lrate=input_freq[0]*Hz rrate=input_freq[1]*Hz if mem_trial: visual_cortex_inputs=[PoissonGroup(visual_input_size, rates=make_mem_rate_function(lrate)), PoissonGroup(visual_input_size, rates=make_mem_rate_function(rrate))] else: visual_cortex_inputs=[PoissonGroup(visual_input_size, rates=make_delay_rate_function(lrate)), PoissonGroup(visual_input_size, rates=make_delay_rate_function(rrate))] go_input=PoissonGroup(go_input_size, rates=make_go_rate_function()) # Create WTA network brain_network=BrainNetworkGroup(lip_size, params=net_params, background_inputs=background_inputs, visual_cortex_input=visual_cortex_inputs, go_input=go_input) # LFP source left_lip_lfp_source=LFPSource(brain_network.left_lip.e_group) right_lip_lfp_source=LFPSource(brain_network.right_lip.e_group) # Create voxel left_lip_voxel=Voxel(network=brain_network.left_lip.neuron_group) right_lip_voxel=Voxel(network=brain_network.right_lip.neuron_group) # Create network monitor brain_monitor=BrainMonitor(background_inputs, visual_cortex_inputs, go_input, brain_network, left_lip_lfp_source, right_lip_lfp_source, left_lip_voxel, right_lip_voxel, record_lfp=record_lfp, record_voxel=record_voxel, record_neuron_state=record_neuron_state, record_spikes=record_spikes, record_pop_firing_rate=record_pop_firing_rate, record_neuron_firing_rates=record_neuron_firing_rate, record_inputs=record_inputs) # Create Brian network and reset clock net=Network(background_inputs, visual_cortex_inputs, go_input, brain_network, left_lip_lfp_source, right_lip_lfp_source, left_lip_voxel, right_lip_voxel, brain_network.connections, brain_monitor.monitors) reinit_default_clock() print "Initialization time:", time() - start_time # Run simulation start_time = time() net.run(trial_duration, report='text') print "Simulation time:", time() - start_time # Compute BOLD signal if record_voxel: start_time=time() brain_monitor.left_voxel_exc_monitor=get_bold_signal(brain_monitor.left_voxel_monitor['G_total_exc'].values[0], left_lip_voxel.params, [500, 1500], trial_duration) brain_monitor.left_voxel_monitor=get_bold_signal(brain_monitor.left_voxel_monitor['G_total'].values[0], left_lip_voxel.params, [500, 1500], trial_duration) brain_monitor.right_voxel_exc_monitor=get_bold_signal(brain_monitor.right_voxel_monitor['G_total_exc'].values[0], right_lip_voxel.params, [500, 1500], trial_duration) brain_monitor.right_voxel_monitor=get_bold_signal(brain_monitor.right_voxel_monitor['G_total'].values[0], right_lip_voxel.params, [500, 1500], trial_duration) print 'Time to compute BOLD:', time() - start_time # Plot outputs if plot_output: brain_monitor.plot(trial_duration) if output_file is not None: write_output(brain_network, background_input_size, background_rate, visual_input_size, input_freq, trial_duration, stim_start_time, stim_end_time, go_start_time, go_end_time, record_pop_firing_rate, record_neuron_state, record_spikes, record_voxel, record_lfp, record_inputs, output_file, left_lip_voxel, right_lip_voxel, brain_monitor) return brain_monitor
def runsim(Nin, weight, fout, sync): sim = Network() clear(True) gc.collect() defaultclock.reinit() duration = 5*second lifeq = "dV/dt = -V/(10*ms) : volt" nrndef = {"model": lifeq, "threshold": "V>=15*mV", "reset": "V=0*mV", "refractory": 2*ms} fin = load_or_calibrate(nrndef, Nin, weight, sync, fout, Vth=15*mV, tau=10*ms) # print("Calibrated frequencies:") # print(", ".join(str(f) for f in fin)) inputgroups = [] connections = [] neurons = [] Nneurons = len(fin) neurons = NeuronGroup(Nneurons, **nrndef) for idx in range(Nneurons): fin_i = fin[idx] sync_i, sigma_i = sync[idx] inputgrp = sl.tools.fast_synchronous_input_gen(Nin, fin_i, sync_i, sigma_i, duration) defaultclock.reinit() conn = Connection(inputgrp, neurons[idx], state="V", weight=weight) inputgroups.append(inputgrp) connections.append(conn) voltagemon = StateMonitor(neurons, "V", record=True) spikemon = SpikeMonitor(neurons, record=True) sim.add(neurons, voltagemon, spikemon) sim.add(*inputgroups) sim.add(*connections) print("Running {} {} {}".format(Nin, weight, fout)) sim.run(duration, report="stdout") mnpss = [] allnpss = [] for idx in range(Nneurons): vmon = voltagemon[idx] smon = spikemon[idx] # print("Desired firing rate: {}".format(fout)) # print("Actual firing rate: {}".format(len(smon)/duration)) if len(smon) > 0: npss = sl.tools.npss(vmon, smon, 0*mV, 15*mV, 10*ms, 2*ms) else: npss = 0 mnpss.append(np.mean(npss)) allnpss.append(npss) nrndeftuple = tuple(nrndef.items()) key = (nrndeftuple, Nin, weight, tuple(sync), fout, 15*mV, 10*ms) save_data(key, allnpss) imshape = (len(sigma), len(Sin)) imextent = (0, 1, 0, 4.0) mnpss = np.reshape(mnpss, imshape, order="F") plt.figure() plt.imshow(mnpss, aspect="auto", origin="lower", extent=imextent, interpolation="none", vmin=0, vmax=1) cbar = plt.colorbar() cbar.set_label("$\overline{M}$") plt.xlabel("$S_{in}$") plt.ylabel("$\sigma_{in}$ (ms)") filename = "npss_{}_{}_{}".format(Nin, weight, fout).replace(".", "") plt.savefig(filename+".pdf") plt.savefig(filename+".png") print("{} saved".format(filename)) voltages = voltagemon.values spiketrains = spikemon.spiketimes.values() pickle.dump({"voltages": voltages, "spiketrains": spiketrains}, open(filename+".pkl", 'w')) return voltagemon, spikemon
from brian import (Network, NeuronGroup, StateMonitor, defaultclock, EmpiricalThreshold, display_in_unit, ms, second, mV, nS, msiemens, uF, uA) import matplotlib.pyplot as plt import numpy as np sim = Network() defaultclock.dt = dt = 0.1*ms duration = 0.1*second # Neuron parameters Cm = 1*uF # /cm**2 gL = 0.1*msiemens EL = -65*mV ENa = 55*mV EK = -90*mV gNa = 35*msiemens gK = 9*msiemens threshold = EmpiricalThreshold(threshold=15*mV, refractory=2*ms) # Input parameters taue = 15*ms taui = 5*ms EExc = 0*mV EInh = -80*mV WExc = 80*nS WInh = 50*nS inputcurrents = [ia*uA for ia in np.arange(0.1, 10.01, 0.01)] eqs='''
def lifsim(mu_amp, mu_offs, simga_amp, sigma_offs, freq, V_th): lifnet = Network() clock.reinit_default_clock() eqs = Equations('dV/dt = (-V+V0)/tau : volt') eqs.prepare() lifnrn = NeuronGroup(1, eqs, threshold=V_th, refractory=t_refr, reset=V_reset) lifnet.add(lifnrn) pulse_times = (np.arange(1, duration*freq, 1)+0.25)/freq pulse_spikes = [] Npoiss = 5000 Npulse = 5000 wpoiss = (mu_offs-mu_amp)/(Npoiss*freq) wpulse = mu_amp/(Npulse*freq) sigma = 1/(freq*5) if (wpulse != 0): for pt in pulse_times: pp = PulsePacket(t=pt*second, n=Npulse, sigma=sigma) pulse_spikes.extend(pp.spiketimes) pulse_input = SpikeGeneratorGroup(Npulse, pulse_spikes) pulse_conn = Connection(pulse_input, lifnrn, 'V', weight=wpulse) lifnet.add(pulse_input, pulse_conn) if (wpoiss != 0): poiss_input = PoissonGroup(Npoiss, freq) poiss_conn = Connection(poiss_input, lifnrn, 'V', weight=wpoiss) lifnet.add(poiss_input, poiss_conn) V_mon = StateMonitor(lifnrn, 'V', record=True) st_mon = SpikeMonitor(lifnrn) lifnet.add(V_mon, st_mon) lifnet.run(duration) V_mon.insert_spikes(st_mon, value=V_th*2) times = V_mon.times membrane = V_mon[0] return times, st_mon.spiketimes[0], membrane
class VirtualSubject: def __init__(self, subj_id, wta_params=default_params(), pyr_params=pyr_params(), inh_params=inh_params(), sim_params=simulation_params(), network_class=WTANetworkGroup): self.subj_id = subj_id self.wta_params = wta_params self.pyr_params = pyr_params self.inh_params = inh_params self.sim_params = sim_params self.simulation_clock = Clock(dt=self.sim_params.dt) self.input_update_clock = Clock(dt=1 / (self.wta_params.refresh_rate / Hz) * second) self.background_input = PoissonGroup(self.wta_params.background_input_size, rates=self.wta_params.background_freq, clock=self.simulation_clock) self.task_inputs = [] for i in range(self.wta_params.num_groups): self.task_inputs.append(PoissonGroup(self.wta_params.task_input_size, rates=self.wta_params.task_input_resting_rate, clock=self.simulation_clock)) # Create WTA network self.wta_network = network_class(params=self.wta_params, background_input=self.background_input, task_inputs=self.task_inputs, pyr_params=self.pyr_params, inh_params=self.inh_params, clock=self.simulation_clock) # Create network monitor self.wta_monitor = WTAMonitor(self.wta_network, self.sim_params, record_neuron_state=False, record_spikes=False, record_firing_rate=True, record_inputs=True, save_summary_only=False, clock=self.simulation_clock) # Create Brian network and reset clock self.net = Network(self.background_input, self.task_inputs, self.wta_network, self.wta_network.connections.values(), self.wta_monitor.monitors.values()) def run_trial(self, sim_params, input_freq): self.wta_monitor.sim_params=sim_params self.net.reinit(states=False) @network_operation(when='start', clock=self.input_update_clock) def set_task_inputs(): for idx in range(len(self.task_inputs)): rate = self.wta_params.task_input_resting_rate if sim_params.stim_start_time <= self.simulation_clock.t < sim_params.stim_end_time: rate = input_freq[idx] * Hz + np.random.randn() * self.wta_params.input_var if rate < self.wta_params.task_input_resting_rate: rate = self.wta_params.task_input_resting_rate self.task_inputs[idx]._S[0, :] = rate @network_operation(clock=self.simulation_clock) def inject_current(): if sim_params.dcs_start_time < self.simulation_clock.t <= sim_params.dcs_end_time: self.wta_network.group_e.I_dcs = sim_params.p_dcs self.wta_network.group_i.I_dcs = sim_params.i_dcs else: self.wta_network.group_e.I_dcs = 0 * pA self.wta_network.group_i.I_dcs = 0 * pA self.net.remove(set_task_inputs, inject_current) self.net.add(set_task_inputs, inject_current) self.net.run(sim_params.trial_duration, report='text')
def runsim(fin): clear(True) gc.collect() defaultclock.reinit() weight = 0.16*mV sim = Network() duration = 2.0*second Vth = 15*mV Vreset = 13.65*mV trefr = 2*ms lifeq = """ dV/dt = -V/(10*ms) : volt Vth : volt """ nrndef = {"model": lifeq, "threshold": "V>=Vth", "reset": "V=Vreset", "refractory": 0.1*ms} inputgroups = [] connections = [] neurons = [] Nneurons = len(fin) neurons = NeuronGroup(Nneurons, **nrndef) neurons.V = 0*mV neurons.Vth = 15*mV for idx in range(Nneurons): fin_i = fin[idx]*Hz inputgrp = PoissonGroup(50, fin_i) conn = Connection(inputgrp, neurons[idx], state="V", weight=weight) inputgroups.append(inputgrp) connections.append(conn) voltagemon = StateMonitor(neurons, "V", record=True) spikemon = SpikeMonitor(neurons, record=True) sim.add(neurons, voltagemon, spikemon) sim.add(*inputgroups) sim.add(*connections) @network_operation def refractory_threshold(clock): for idx in range(Nneurons): if (len(spikemon.spiketimes[idx]) and clock.t < spikemon.spiketimes[idx][-1]*second+trefr): neurons.Vth[idx] = 100*mV else: neurons.Vth[idx] = Vth sim.add(refractory_threshold) print("Running simulation of {} neurons for {} s".format(Nneurons, duration)) sim.run(duration, report="stdout") mnpss = [] allnpss = [] outisi = [] for idx in range(Nneurons): vmon = voltagemon[idx] smon = spikemon[idx] if not len(smon): continue outisi.append(duration*1000/len(smon)) if len(smon) > 0: npss = sl.tools.npss(vmon, smon, 0*mV, 15*mV, 10*ms, 2*ms) else: npss = 0 mnpss.append(np.mean(npss)) allnpss.append(npss) return outisi, mnpss
def run_simulation(realizations=1, trials=1, t=3000 * ms, alpha=1, ree=1, k=50, winlen = 50 * ms, verbose=True, t_stim = 0): """ Run the whole simulation with the specified parameters. All model parameter are set in the function. Keyword arguments: :param realizations: number of repititions of the whole simulation, number of network instances :param trials: number of trials for network instance :param t: simulation time :param alpha: scaling factor for number of neurons in the network :param ree: clustering coefficient :param k: number of clusters :param t_stim : duration of stimulation of a subset of clusters :param winlen: length of window in ms :param verbose: plotting flag :return: numpy matrices with spike times """ # The equations defining our neuron model eqs_string = ''' dV/dt = (mu - V)/tau + x: volt dx/dt = -1.0/tau_2*(x - y/tau_1) : volt/second dy/dt = -y/tau_1 : volt mu : volt tau: second tau_2: second tau_1: second ''' # Model parameters n_e = int(4000 * alpha) # number of exc neurons n_i = int(1000 * alpha) # number of inh neurons tau_e = 15 * ms # membrane time constant (for excitatory synapses) tau_i = 10 * ms # membrane time constant (for inhibitory synapses) tau_syn_2_e = 3 * ms # exc synaptic time constant tau2 in paper tau_syn_2_i = 2 * ms # inh synaptic time constant tau2 in paper tau_syn_1 = 1 * ms # exc/inh synaptic time constant tau1 in paper vt = -50 * mV # firing threshold vr = -65 * mV # reset potential dv = vt - vr # delta v refrac = 5 * ms # absolute refractory period # scale the weights to ensure same variance in the inputs wee = 0.024 * dv * np.sqrt(1. / alpha) wie = 0.014 * dv * np.sqrt(1. / alpha) wii = -0.057 * dv * np.sqrt(1. / alpha) wei = -0.045 * dv * np.sqrt(1. / alpha) # Connection probability p_ee = 0.2 p_ii = 0.5 p_ie = 0.5 p_ei = 0.5 # determine probs for inside and outside of clusters p_in, p_out = get_cluster_connection_probs(ree, k, p_ee) mu_min_e, mu_max_e = 1.1, 1.2 mu_min_i, mu_max_i = 1.0, 1.05 # increase cluster weights if there are clusters wee_cluster = wee if p_in == p_out else 1.9 * wee # define numpy array for data storing all_data = np.zeros((realizations, trials, n_e+n_i, int(t/winlen)//2)) for realization in range(realizations): # clear workspace to make sure that is a new realization of the network clear(True, True) reinit() # set up new random bias parameter for every type of neuron mu_e = vr + np.random.uniform(mu_min_e, mu_max_e, n_e) * dv # bias for excitatory neurons mu_i = vr + np.random.uniform(mu_min_i, mu_max_i, n_i) * dv # bias for excitatory neurons # Let's create an equation object from our string and parameters model_eqs = Equations(eqs_string) # Let's create 5000 neurons all_neurons = NeuronGroup(N=n_e + n_i, model=model_eqs, threshold=vt, reset=vr, refractory=refrac, freeze=True, method='Euler', compile=True) # Divide the neurons into excitatory and inhibitory ones neurons_e = all_neurons[0:n_e] neurons_i = all_neurons[n_e:n_e + n_i] # set the bias neurons_e.mu = mu_e neurons_i.mu = mu_i neurons_e.tau = tau_e neurons_i.tau = tau_i neurons_e.tau_2 = tau_syn_2_e neurons_i.tau_2 = tau_syn_2_i all_neurons.tau_1 = tau_syn_1 # set up connections connections = Connection(all_neurons, all_neurons, 'y') # do the cluster connection like cross validation: cluster neuron := test idx; other neurons := train idx kf = KFold(n=n_e, n_folds=k) for idx_out, idx_in in kf: # idx_out holds all other neurons; idx_in holds all cluster neurons # connect current cluster to itself connections.connect_random(all_neurons[idx_in[0]:idx_in[-1]], all_neurons[idx_in[0]:idx_in[-1]], sparseness=p_in, weight=wee_cluster) # connect current cluster to other neurons connections.connect_random(all_neurons[idx_in[0]:idx_in[-1]], all_neurons[idx_out[0]:idx_out[-1]], sparseness=p_out, weight=wee) # connect all excitatory to all inhibitory, irrespective of clustering connections.connect_random(all_neurons[0:n_e], all_neurons[n_e:(n_e + n_i)], sparseness=p_ie, weight=wie) # connect all inhibitory to all excitatory connections.connect_random(all_neurons[n_e:(n_e + n_i)], all_neurons[0:n_e], sparseness=p_ei, weight=wei) # connect all inhibitory to all inhibitory connections.connect_random(all_neurons[n_e:(n_e + n_i)], all_neurons[n_e:(n_e + n_i)], sparseness=p_ii, weight=wii) # set up spike monitors spike_mon_e = SpikeMonitor(neurons_e) spike_mon_i = SpikeMonitor(neurons_i) # set up network with monitors network = Network(all_neurons, connections, spike_mon_e, spike_mon_i) # run this network for some number of trials, every time with for trial in range(trials): # different initial values all_neurons.V = vr + (vt - vr) * np.random.rand(len(all_neurons)) * 1.4 # Calibration phase # run for the first half of the time to let the neurons adapt network.run(t/2) # reset monitors to start recording phase spike_mon_i.reinit() spike_mon_e.reinit() # stimulation if duration is given # define index variable for the stimulation possibility (is 0 for stimulation time=0) t_stim_idx = int(t_stim / (winlen/ms)) if not(t_stim==0): # Stimulation phase, increase input to subset of clusters all_neurons[:400].mu += 0.07 * dv network.run(t_stim * ms, report='text') # set back to normal all_neurons[:400].mu -= 0.07 * dv # save data all_data[realization, trial, :n_e, :t_stim_idx] = spikes_counter(spike_mon_e, winlen) all_data[realization, trial, n_e:, :t_stim_idx] = spikes_counter(spike_mon_i, winlen) # reset monitors spike_mon_e.reinit() spike_mon_i.reinit() # run the remaining time of the simulation network.run((t/2) - t_stim*ms, report='text') # save results all_data[realization, trial, :n_e, t_stim_idx:] = spikes_counter(spike_mon_e, winlen) all_data[realization, trial, n_e:, t_stim_idx:] = spikes_counter(spike_mon_i, winlen) if verbose: plt.ion() plt.figure() raster_plot(spike_mon_e) plt.title('Excitatory neurons') spike_mon_e.reinit() spike_mon_i.reinit() return all_data
import numpy as np import itertools as itt fin = [f * Hz for f in range(10, 41, 5)] win = [w * mV for w in np.arange(0.5, 2.1, 0.5)] Nin = [n for n in range(100, 181, 20)] tau = 10 * ms Vth = 15 * mV reset = 0 * mV configs = [c for c in itt.product(Nin, fin, win)] Nsims = len(configs) print("Number of configurations: {}".format(Nsims)) lifeq = "dV/dt = -V/tau : volt" sim = Network() nrn = NeuronGroup(Nsims, lifeq, threshold="V>=Vth", reset="V=reset") inputgroups = [] connections = [] print("Setting up ...") for idx, c in enumerate(configs): n, f, w = c inp = PoissonGroup(n, f) conn = Connection(inp, nrn[idx], state="V", weight=w) inputgroups.append(inp) connections.append(conn) print("\r{}/{}".format(idx + 1, Nsims), end="") sys.stdout.flush() print() spikemon = SpikeMonitor(nrn)
def test_stim_pyramidal_impact(): simulation_clock = Clock(dt=.5 * ms) trial_duration = 1 * second dcs_start_time = .5 * second stim_levels = [-8, -6, -4, -2, -1, -.5, -.25, 0, .25, .5, 1, 2, 4, 6, 8] voltages = np.zeros(len(stim_levels)) for idx, stim_level in enumerate(stim_levels): print('testing stim_level %.3fpA' % stim_level) eqs = exp_IF(default_params.C, default_params.gL, default_params.EL, default_params.VT, default_params.DeltaT) # AMPA conductance - recurrent input current eqs += exp_synapse('g_ampa_r', default_params.tau_ampa, siemens) eqs += Current('I_ampa_r=g_ampa_r*(E-vm): amp', E=default_params.E_ampa) # AMPA conductance - background input current eqs += exp_synapse('g_ampa_b', default_params.tau_ampa, siemens) eqs += Current('I_ampa_b=g_ampa_b*(E-vm): amp', E=default_params.E_ampa) # AMPA conductance - task input current eqs += exp_synapse('g_ampa_x', default_params.tau_ampa, siemens) eqs += Current('I_ampa_x=g_ampa_x*(E-vm): amp', E=default_params.E_ampa) # Voltage-dependent NMDA conductance eqs += biexp_synapse('g_nmda', default_params.tau1_nmda, default_params.tau2_nmda, siemens) eqs += Equations('g_V = 1/(1+(Mg/3.57)*exp(-0.062 *vm/mV)) : 1 ', Mg=default_params.Mg) eqs += Current('I_nmda=g_V*g_nmda*(E-vm): amp', E=default_params.E_nmda) # GABA-A conductance eqs += exp_synapse('g_gaba_a', default_params.tau_gaba_a, siemens) eqs += Current('I_gaba_a=g_gaba_a*(E-vm): amp', E=default_params.E_gaba_a) eqs += InjectedCurrent('I_dcs: amp') group = NeuronGroup(1, model=eqs, threshold=-20 * mV, refractory=pyr_params.refractory, reset=default_params.Vr, compile=True, freeze=True, clock=simulation_clock) group.C = pyr_params.C group.gL = pyr_params.gL @network_operation(clock=simulation_clock) def inject_current(c): if simulation_clock.t > dcs_start_time: group.I_dcs = stim_level * pA monitor = StateMonitor(group, 'vm', simulation_clock, record=True) net = Network(group, monitor, inject_current) net.run(trial_duration, report='text') voltages[idx] = monitor.values[0, -1] * 1000 voltages = voltages - voltages[7] plt.figure() plt.plot(stim_levels, voltages) plt.xlabel('Stimulation level (pA)') plt.ylabel('Voltage Change (mV)') plt.show()
def runsim(neuron_model, # sim params dt, simtime, prerun, monitors, recvars, # stimulation params fstim, r0_bg, r0_stim, stim_starts, stim_stops, stim_odors, stim_amps, stim_start_var, # network params beeid, N_glu, N_KC, ORNperGlu, PNperKC, PN_I0, LN_I0, # network weights wi, wORNLN, wORNPN, wPNKC, # default params V0min, inh_struct=None, Winh=None, timestep=500, report=None): np.random.seed() #needed for numpy/brian when runing parallel sims define_default_clock(dt=dt) inh_on_off = 0 if (wi == 0) or (wi is None) or (wORNLN is None) else 1 ######################### NEURONGROUPS ######################### NG = dict() # ORN Input # For each glumerolus, random temporal response jitter can be added. # The jitter is added to the response onset. Maximum jitter is given by stim_start_var. # stim_start_jittered is a vector containing the jittered stim start tims # orn_activation returns a booolean vector of stim presence given time t # Total ORN rate: Baseline componenent equal for all units, # and individual activationa. jitter = np.random.uniform(0,stim_start_var,N_glu) stim_tun = lambda odorN: fstim(N_glu=N_glu, odorN=odorN) * r0_stim orn_activation = lambda t: np.sum([ a*stim_tun(odorN=o)*np.logical_and(np.greater(t,prerun+stim_start+jitter), np.less(t,prerun+stim_stop)) for stim_start,stim_stop,o,a in zip(stim_starts, stim_stops, stim_odors, stim_amps)], 0) orn_rates = lambda t: np.repeat(r0_bg + orn_activation(t),repeats = ORNperGlu) NG['ORN'] = PoissonGroup(ORNperGlu*N_glu, rates=orn_rates) NG['PN'] = NeuronGroup(N_glu, **neuron_model) NG['LN'] = NeuronGroup(N_glu*inh_on_off, **neuron_model) if 'KC' in monitors: NG['KC'] = NeuronGroup(N_KC, **neuron_model) ######################### CONNECTIONS ######################### c = dict() c['ORNPN'] = Connection(NG['ORN'],NG['PN'],'ge') for i in np.arange(N_glu): c['ORNPN'].connect_full(NG['ORN'].subgroup(ORNperGlu),NG['PN'][i],weight=wORNPN) if inh_on_off: print('-- inhibiting --',wi) c['ORNLN'] = Connection(NG['ORN'],NG['LN'],'ge') c['LNPN'] = Connection(NG['LN'],NG['PN'],'gi',weight=(wi*35)/N_glu) for i in np.arange(N_glu): c['ORNLN'].connect_full(NG['ORN'][ i*ORNperGlu : (i+1)*ORNperGlu ], NG['LN'][i], weight = wORNLN) if inh_struct: c['LNPN'].connect(NG['LN'],NG['PN'],Winh) if 'KC' in monitors: c['KC'] = Connection(NG['PN'],NG['KC'],'ge') c['KC'].connect_random(NG['PN'],NG['KC'],p=PNperKC/float(N_glu),weight=wPNKC,seed=beeid) ######################### INITIAL VALUES ######################### VT = neuron_model['threshold'] NG['PN'].vm = np.random.uniform(V0min,VT,size=len(NG['PN'])) if inh_on_off: NG['LN'].vm= np.random.uniform(V0min,VT,size=len(NG['LN'])) if 'KC' in monitors: NG['KC'].vm= np.random.uniform(V0min,VT,size=len(NG['KC'])) net = Network(NG.values(), c.values()) #### Compensation currents ### NG['PN'].I0 = PN_I0 NG['LN'].I0 = LN_I0 ########################################################################## ######################### PRE-RUN ######################### net.run(prerun) ######################### MONITORS ######################### spmons = [SpikeMonitor(NG[mon], record=True) for mon in monitors] net.add(spmons) if len(recvars) > 0: mons = [MultiStateMonitor(NG[mon], vars=recvars, record=True, timestep=timestep) for mon in monitors] net.add(mons) else: mons = None ######################### RUN ######################### net = run(simtime, report=report) out_spikes = dict( (monitors[i],np.array(sm.spikes)) for i,sm in enumerate(spmons) ) if mons is not None: out_mons = dict( (mon,dict((var,statemon.values) for var,statemon in m.iteritems())) for mon,m in zip(monitors,mons)) else: out_mons = None #subtract the prerun from spike times, if there are any for spikes in out_spikes.itervalues(): if len(spikes) != 0: spikes[:,1] -= prerun return out_spikes, out_mons
neurons_i.V = np.random.uniform(V_reset, V_th * 1.1, N_i) # Make some monitors to record spikes of all neurons and the membrane potential of a few spike_mon_e = SpikeMonitor(neurons_e) spike_mon_i = SpikeMonitor(neurons_i) #state_mon_v_e = StateMonitor(neurons_e, 'V', record=[0,1,2]) #state_mon_v_i = StateMonitor(neurons_i, 'V', record=[0,1]) #state_mon_isyn = StateMonitor(neurons_i, 'Isyn', record=[0,1]) # Put everything into the network conn_ii conn_ee_clusters, conn_ei, conn_ie, network = Network(neurons_e, neurons_i, spike_mon_e, conn_ii, conn_ee_clusters, conn_ie, conn_ei, spike_mon_i)#, state_mon_v_i, state_mon_isyn, state_mon_v_e,) # Let's run our simulation network.run(duration, report='text') #plt.figure() #plt.plot(state_mon_isyn.times,state_mon_isyn.values[0,:]) # Plot spike raster plots, blue exc neurons, red inh neurons plt.figure() gs = gridspec.GridSpec(2, 1, height_ratios=[1, 3])
def run_simulation(realizations=1, trials=1, t=3000 * ms, alpha=1, ree=1, k=50, winlen=50 * ms, verbose=True, t_stim=0): """ Run the whole simulation with the specified parameters. All model parameter are set in the function. Keyword arguments: :param realizations: number of repititions of the whole simulation, number of network instances :param trials: number of trials for network instance :param t: simulation time :param alpha: scaling factor for number of neurons in the network :param ree: clustering coefficient :param k: number of clusters :param t_stim : duration of stimulation of a subset of clusters :param winlen: length of window in ms :param verbose: plotting flag :return: numpy matrices with spike times """ # The equations defining our neuron model eqs_string = """ dV/dt = (mu - V)/tau + x: volt dx/dt = -1.0/tau_2*(x - y/tau_1) : volt/second dy/dt = -y/tau_1 : volt mu : volt tau: second tau_2: second tau_1: second """ # Model parameters n_e = int(4000 * alpha) # number of exc neurons n_i = int(1000 * alpha) # number of inh neurons tau_e = 15 * ms # membrane time constant (for excitatory synapses) tau_i = 10 * ms # membrane time constant (for inhibitory synapses) tau_syn_2_e = 3 * ms # exc synaptic time constant tau2 in paper tau_syn_2_i = 2 * ms # inh synaptic time constant tau2 in paper tau_syn_1 = 1 * ms # exc/inh synaptic time constant tau1 in paper vt = -50 * mV # firing threshold vr = -65 * mV # reset potential dv = vt - vr # delta v refrac = 5 * ms # absolute refractory period # scale the weights to ensure same variance in the inputs wee = 0.024 * dv * np.sqrt(1.0 / alpha) wie = 0.014 * dv * np.sqrt(1.0 / alpha) wii = -0.057 * dv * np.sqrt(1.0 / alpha) wei = -0.045 * dv * np.sqrt(1.0 / alpha) # Connection probability p_ee = 0.2 p_ii = 0.5 p_ie = 0.5 p_ei = 0.5 # determine probs for inside and outside of clusters p_in, p_out = get_cluster_connection_probs(ree, k, p_ee) mu_min_e, mu_max_e = 1.1, 1.2 mu_min_i, mu_max_i = 1.0, 1.05 # increase cluster weights if there are clusters wee_cluster = wee if p_in == p_out else 1.9 * wee # define numpy array for data storing all_data = np.zeros((realizations, trials, n_e + n_i, int(t / winlen) // 2)) for realization in range(realizations): # clear workspace to make sure that is a new realization of the network clear(True, True) reinit() # set up new random bias parameter for every type of neuron mu_e = vr + np.random.uniform(mu_min_e, mu_max_e, n_e) * dv # bias for excitatory neurons mu_i = vr + np.random.uniform(mu_min_i, mu_max_i, n_i) * dv # bias for excitatory neurons # Let's create an equation object from our string and parameters model_eqs = Equations(eqs_string) # Let's create 5000 neurons all_neurons = NeuronGroup( N=n_e + n_i, model=model_eqs, threshold=vt, reset=vr, refractory=refrac, freeze=True, method="Euler", compile=True, ) # Divide the neurons into excitatory and inhibitory ones neurons_e = all_neurons[0:n_e] neurons_i = all_neurons[n_e : n_e + n_i] # set the bias neurons_e.mu = mu_e neurons_i.mu = mu_i neurons_e.tau = tau_e neurons_i.tau = tau_i neurons_e.tau_2 = tau_syn_2_e neurons_i.tau_2 = tau_syn_2_i all_neurons.tau_1 = tau_syn_1 # set up connections connections = Connection(all_neurons, all_neurons, "y") # do the cluster connection like cross validation: cluster neuron := test idx; other neurons := train idx kf = KFold(n=n_e, n_folds=k) for idx_out, idx_in in kf: # idx_out holds all other neurons; idx_in holds all cluster neurons # connect current cluster to itself connections.connect_random( all_neurons[idx_in[0] : idx_in[-1]], all_neurons[idx_in[0] : idx_in[-1]], sparseness=p_in, weight=wee_cluster, ) # connect current cluster to other neurons connections.connect_random( all_neurons[idx_in[0] : idx_in[-1]], all_neurons[idx_out[0] : idx_out[-1]], sparseness=p_out, weight=wee ) # connect all excitatory to all inhibitory, irrespective of clustering connections.connect_random(all_neurons[0:n_e], all_neurons[n_e : (n_e + n_i)], sparseness=p_ie, weight=wie) # connect all inhibitory to all excitatory connections.connect_random(all_neurons[n_e : (n_e + n_i)], all_neurons[0:n_e], sparseness=p_ei, weight=wei) # connect all inhibitory to all inhibitory connections.connect_random( all_neurons[n_e : (n_e + n_i)], all_neurons[n_e : (n_e + n_i)], sparseness=p_ii, weight=wii ) # set up spike monitors spike_mon_e = SpikeMonitor(neurons_e) spike_mon_i = SpikeMonitor(neurons_i) # set up network with monitors network = Network(all_neurons, connections, spike_mon_e, spike_mon_i) # run this network for some number of trials, every time with for trial in range(trials): # different initial values all_neurons.V = vr + (vt - vr) * np.random.rand(len(all_neurons)) * 1.4 # Calibration phase # run for the first half of the time to let the neurons adapt network.run(t / 2) # reset monitors to start recording phase spike_mon_i.reinit() spike_mon_e.reinit() # stimulation if duration is given # define index variable for the stimulation possibility (is 0 for stimulation time=0) t_stim_idx = int(t_stim / (winlen / ms)) if not (t_stim == 0): # Stimulation phase, increase input to subset of clusters all_neurons[:400].mu += 0.07 * dv network.run(t_stim * ms, report="text") # set back to normal all_neurons[:400].mu -= 0.07 * dv # save data all_data[realization, trial, :n_e, :t_stim_idx] = spikes_counter(spike_mon_e, winlen) all_data[realization, trial, n_e:, :t_stim_idx] = spikes_counter(spike_mon_i, winlen) # reset monitors spike_mon_e.reinit() spike_mon_i.reinit() # run the remaining time of the simulation network.run((t / 2) - t_stim * ms, report="text") # save results all_data[realization, trial, :n_e, t_stim_idx:] = spikes_counter(spike_mon_e, winlen) all_data[realization, trial, n_e:, t_stim_idx:] = spikes_counter(spike_mon_i, winlen) if verbose: plt.ion() plt.figure() raster_plot(spike_mon_e) plt.title("Excitatory neurons") spike_mon_e.reinit() spike_mon_i.reinit() return all_data
from brian import (NeuronGroup, Network, StateMonitor, second, ms, volt, mV) import numpy as np import matplotlib.pyplot as plt network = Network() XT = -50*mV DeltaT = 0.05*mV/ms eqs = "dX/dt = DeltaT*exp((X-XT)/DeltaT) : volt" neuron = NeuronGroup(1, eqs, threshold="X>=XT", reset=-65*mV) neuron.X = -65*mV network.add(neuron) vmon = StateMonitor(neuron, "X", record=True) network.add(vmon) network.run(1*second) plt.figure("Voltage") plt.plot(vmon.times, vmon[0]) plt.show()