def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, max_delay=DEFAULT_MAX_DELAY, **extra_params): """ Should be called at the very beginning of a script. `extra_params` contains any keyword arguments that are required by a given simulator but not by others. NEST-specific extra_params: `spike_precision`: should be "on_grid" (default) or "off_grid" `verbosity`: INSERT DESCRIPTION OF POSSIBLE VALUES `recording_precision`: number of decimal places (OR SIGNIFICANT FIGURES?) in recorded data `threads`: number of threads to use `grng_seed`: one seed for the global random number generator of NEST `rng_seeds`: a list of seeds, one for each thread on each MPI process `rng_seeds_seed`: a single seed that will be used to generate random values for `rng_seeds` """ common.setup(timestep, min_delay, max_delay, **extra_params) simulator.state.clear() for key in ("verbosity", "spike_precision", "recording_precision", "threads"): if key in extra_params: setattr(simulator.state, key, extra_params[key]) # set kernel RNG seeds simulator.state.num_threads = extra_params.get('threads') or 1 if 'grng_seed' in extra_params: simulator.state.grng_seed = extra_params['grng_seed'] if 'rng_seeds' in extra_params: simulator.state.rng_seeds = extra_params['rng_seeds'] else: rng = NumpyRNG(extra_params.get('rng_seeds_seed', 42)) n = simulator.state.num_processes * simulator.state.threads simulator.state.rng_seeds = rng.next(n, 'uniform_int', {'low': 0, 'high': 100000}).tolist() # set resolution simulator.state.dt = timestep # Set min_delay and max_delay for all synapse models simulator.state.set_delays(min_delay, max_delay) nest.SetDefaults('spike_generator', {'precise_times': True}) return rank()
def fixed_number_pre_with_replacement_heterogeneous_parameters(sim): sim.setup() p1 = sim.Population(5, sim.IF_cond_exp()) p2 = sim.Population(7, sim.IF_cond_exp()) connector1 = sim.FixedNumberPreConnector(n=3, with_replacement=True, rng=NumpyRNG()) synapse_type2 = sim.TsodyksMarkramSynapse(weight=lambda d: d, delay=0.5, U=lambda d: 0.02 * d + 0.1) #synapse_type2 = sim.TsodyksMarkramSynapse(weight=0.001, delay=0.5, U=lambda d: 2*d+0.1) prj2 = sim.Projection(p1, p2, connector1, synapse_type2) print("Projection 2") x = prj2.get(['weight', 'delay', 'U'], format='list', gather=False) from pprint import pprint pprint(x) i, j, w, d, u = numpy.array(x).T assert_arrays_equal(w, abs(i - j)) assert_arrays_equal(d, 0.5 * numpy.ones(p2.size * connector1.n)) assert_arrays_equal(u, 0.02 * abs(i - j) + 0.1) sim.end()
def initialize(): global sim global options global extra global rngseed global parallel_safe global rng global n_ext global n_exc global n_inh sim, options = get_simulator( ("--plot-figure", "Plot the connections to a file.")) init_logging(None, debug=True) # === General parameters ================================================= threads = 1 rngseed = 98765 parallel_safe = True rng = NumpyRNG(seed=rngseed, parallel_safe=parallel_safe) # === general network parameters (except connections) ==================== n_ext = 60 # number of external stimuli n_exc = 60 # number of excitatory cells n_inh = 60 # number of inhibitory cells # === Options ============================================================ extra = { 'loglevel': 2, 'useSystemSim': True, 'maxNeuronLoss': 0., 'maxSynapseLoss': 0.4, 'hardwareNeuronSize': 8, 'threads': threads, 'filename': "connections.xml", 'label': 'VA' } if sim.__name__ == "pyNN.hardware.brainscales": extra['hardware'] = sim.hardwareSetup['small'] if options.simulator == "neuroml": extra["file"] = "connections.xml"
def scenario4(sim): """ Network with spatial structure """ init_logging(logfile=None, debug=True) sim.setup() rng = NumpyRNG(seed=76454, parallel_safe=False) input_layout = RandomStructure(boundary=Cuboid(width=500.0, height=500.0, depth=100.0), origin=(0, 0, 0), rng=rng) inputs = sim.Population(100, sim.SpikeSourcePoisson(rate=RandomDistribution('uniform', [3.0, 7.0], rng=rng)), structure=input_layout, label="inputs") output_layout = Grid3D(aspect_ratioXY=1.0, aspect_ratioXZ=5.0, dx=10.0, dy=10.0, dz=10.0, x0=0.0, y0=0.0, z0=200.0) outputs = sim.Population(200, sim.EIF_cond_exp_isfa_ista(), initial_values = {'v': RandomDistribution('normal', [-65.0, 5.0], rng=rng), 'w': RandomDistribution('normal', [0.0, 1.0], rng=rng)}, structure=output_layout, # 10x10x2 grid label="outputs") logger.debug("Output population positions:\n %s", outputs.positions) DDPC = sim.DistanceDependentProbabilityConnector input_connectivity = DDPC("0.5*exp(-d/100.0)", rng=rng) recurrent_connectivity = DDPC("sin(pi*d/250.0)**2", rng=rng) depressing = sim.TsodyksMarkramSynapse(weight=RandomDistribution('normal', (0.1, 0.02), rng=rng), delay="0.5 + d/100.0", U=0.5, tau_rec=800.0, tau_facil=0.0) facilitating = sim.TsodyksMarkramSynapse(weight=0.05, delay="0.2 + d/100.0", U=0.04, tau_rec=100.0, tau_facil=1000.0) input_connections = sim.Projection(inputs, outputs, input_connectivity, receptor_type='excitatory', synapse_type=depressing, space=Space(axes='xy'), label="input connections") recurrent_connections = sim.Projection(outputs, outputs, recurrent_connectivity, receptor_type='inhibitory', synapse_type=facilitating, space=Space(periodic_boundaries=((-100.0, 100.0), (-100.0, 100.0), None)), # should add "calculate_boundaries" method to Structure classes label="recurrent connections") outputs.record('spikes') outputs.sample(10, rng=rng).record('v') sim.run(1000.0) data = outputs.get_data() sim.end() return data
def do_run(split, seed=None): p.setup(1.0) if split: p.set_number_of_neurons_per_core(p.SpikeSourcePoisson, 27) p.set_number_of_neurons_per_core(p.IF_curr_exp, 22) inp = p.Population(100, p.SpikeSourcePoisson(rate=100), label="input", additional_parameters={"seed": seed}) pop = p.Population(100, p.IF_curr_exp, {}, label="pop") p.Projection(inp, pop, p.OneToOneConnector(), synapse_type=p.StaticSynapse(weight=5)) pop.record("spikes") inp.record("spikes") p.run(100) inp.set(rate=10) # pop.set("cm", 0.25) pop.set(tau_syn_E=1) p.run(100) pop_spikes1 = pop.spinnaker_get_data('spikes') inp_spikes1 = inp.spinnaker_get_data('spikes') p.reset() inp.set(rate=0) pop.set(i_offset=1.0) vs = p.RandomDistribution( "uniform", [-65.0, -55.0], rng=NumpyRNG(seed=seed)) pop.initialize(v=vs) p.run(100) pop_spikes2 = pop.spinnaker_get_data('spikes') inp_spikes2 = inp.spinnaker_get_data('spikes') p.end() return (pop_spikes1, inp_spikes1, pop_spikes2, inp_spikes2)
def __init__(self, mean, stdev, dt=None, start=0.0, stop=None, rng=None): """Construct the current source. Required arguments: mean -- mean current amplitude in nA stdev -- standard deviation of the current amplitude in nA Optional arguments: dt -- interval between updates of the current amplitude. Must be a multiple of the simulation time step. If not specified, the simulation time step will be used. start -- onset of the current injection in ms. If not specified, the current will begin at the start of the simulation. stop -- end of the current injection in ms. If not specified, the current will continue until the end of the simulation. rng -- an RNG object from the `pyNN.random` module. For speed, this should be a `NativeRNG` instance (uses the simulator's internal random number generator). For reproducibility across simulators, use one of the other RNG types. If not specified, a NumpyRNG is used. """ self.rng = rng or NumpyRNG() self.dt = dt or state.dt if dt: assert self.dt % dt == 0 self.start = start self.stop = stop self.mean = mean self.stdev = stdev if isinstance(rng, NativeRNG): self._device = nest.Create('noise_generator') nest.SetStatus( self._device, { 'mean': mean * 1000.0, 'std': stdev * 1000.0, 'start': self.delay_correction(start), 'dt': self.dt }) if stop: nest.SetStatus(self._device, {'stop': self.delay_correction(stop)}) else: raise NotImplementedError( "Only using a NativeRNG is currently supported.")
def unif(n_spikes, T, rng=NumpyRNG(), rounding=False, t_min=1.0, min_isi=10.0): """ Generate uniformally distributed spikes between [t_min, T), with minimum inter-spike separation and optional rounding pyNN.nest is generally unstable with rounding for input spikes pyNN.nest errors if lowest spike value is exactly equal to dt Input spikes between 0.0 and dt are not integrated over Parameters ---------- n_spikes : int T : float Time interval (ms) t_min : float Lower bound on generated time value (ms) min_isi : float Minimum inter-spike separation : n_spikes*MIN_ISI << T (default 10 ms) Returns ------- spike_times : pyNN.parameters.Sequence (float64) """ spike_times = np.empty([0], dtype=float) while spike_times.size < n_spikes: timing = rng.uniform(t_min, T) # Ensure minimum separation w.r.t. existing spikes if (spike_times.size > 0 and np.min(np.abs(timing - spike_times)) < min_isi): continue else: spike_times = np.append(spike_times, timing) spike_times.sort() if rounding: return Sequence(np.floor(spike_times)) else: return Sequence(spike_times)
def fixed_number_post_with_replacement(sim): sim.setup() p1 = sim.Population(5, sim.IF_cond_exp()) p2 = sim.Population(7, sim.IF_cond_exp()) synapse_type1 = sim.StaticSynapse(weight=0.5, delay=0.5) connector1 = sim.FixedNumberPostConnector(n=9, with_replacement=True, rng=NumpyRNG()) prj1 = sim.Projection(p1, p2, connector1, synapse_type1) print("Projection #1\n", connection_plot(prj1)) delays = prj1.get('delay', format='list', gather=False) assert_equal(len(delays), connector1.n * p1.size) weights = prj1.get('weight', format='array', gather=False) for row in weights: row[np.isnan(row)] = 0 assert_equal(row.sum(), 4.5) weights2 = prj1.get('weight', format='array', gather=False, multiple_synapses='min') for row in weights2: n_nan = np.isnan(row).sum() row[np.isnan(row)] = 0 assert_equal(row.sum(), (row.size - n_nan)*0.5)
def _generate_random_values( self, values, n_connections, pre_vertex_slice, post_vertex_slice): """ :param ~pyNN.random.NumpyRNG values: :param int n_connections: :param ~pacman.model.graphs.common.Slice pre_vertex_slice: :param ~pacman.model.graphs.common.Slice post_vertex_slice: :rtype: ~numpy.ndarray """ key = (id(pre_vertex_slice), id(post_vertex_slice), id(values)) seed = self.__param_seeds.get(key, None) if seed is None: seed = int(values.rng.next() * 0x7FFFFFFF) self.__param_seeds[key] = seed new_rng = NumpyRNG(seed) copy_rd = RandomDistribution( values.name, parameters_pos=None, rng=new_rng, **values.parameters) if n_connections == 1: return numpy.array([copy_rd.next(1)], dtype="float64") return copy_rd.next(n_connections)
def __init__(self, connector, pre_population, post_population, prepop_is_view, postpop_is_view, rng, synapse_dynamics, synapse_type, is_virtual_machine, weights=None, delays=None): """ :param AbstractConnector connector: The connector connected to the synapse :param pre_population: The population sending spikes to the synapse :type pre_population: ~spynnaker.pyNN.models.populations.Population or ~spynnaker.pyNN.models.populations.PopulationView :param post_population: The population hosting the synapse :type post_population: ~spynnaker.pyNN.models.populations.Population or ~spynnaker.pyNN.models.populations.PopulationView :param bool prepop_is_view: Whether the ``pre_population`` is a view :param bool postpop_is_view: Whether the ``post_population`` is a view :param rng: Seeded random number generator :type rng: ~pyNN.random.NumpyRNG or None :param AbstractSynapseDynamics synapse_dynamics: The dynamic behaviour of the synapse :param int synapse_type: The type of the synapse :param bool is_virtual_machine: Whether the machine is virtual :param weights: The synaptic weights :type weights: float or list(float) or ~numpy.ndarray(float) or None :param delays: The total synaptic delays :type delays: float or list(float) or ~numpy.ndarray(float) or None """ self.__connector = connector self.__pre_population = pre_population self.__post_population = post_population self.__prepop_is_view = prepop_is_view self.__postpop_is_view = postpop_is_view self.__rng = (rng or NumpyRNG()) self.__synapse_dynamics = synapse_dynamics self.__synapse_type = synapse_type self.__weights = weights self.__delays = delays self.__is_virtual_machine = is_virtual_machine # Make a list of holders to be updated self.__pre_run_connection_holders = list()
class param: seed = 8658764 # Seed for reproduction of random number rng = NumpyRNG() # Use seed to reproduce input_nr = 9 # Number of input neurons readout_nr = 2 # Number of readout neurons reservoir_nr = 50 # Number of reservour neurons simulation_time = 19.0 # Simulation time for each input dt = 1 # Timestep in simulation res_pconn = 0.1 # sparse connection probability for reservoir images_train_nr = 9 # Number of training images to train with, # Must be a factor of 3 images_test_nr = 9 # Number of test images images_train = generate_labeledImages(images_train_nr) images_test = generate_labeledImages(images_test_nr) # If network uses excitatory and inhibatory neurons res_exc_nr = int(math.ceil(reservoir_nr * 0.8)) # Number of excitatory neurons res_inh_nr = int(math.floor(reservoir_nr * 0.2)) # Number of inhibitory neurons print('exc:', res_exc_nr)
def test_initial_value(self): sim.setup(timestep=1.0) pop = sim.Population(5, sim.IF_curr_exp(), label="pop_1") self.assertEqual([-65, -65, -65, -65, -65], pop.get_initial_value("v")) view = PopulationView(pop, [1, 3], label="Odds") view2 = PopulationView(pop, [1, 2], label="OneTwo") view_iv = view.initial_values self.assertEqual(1, len(view_iv)) self.assertEqual([-65, -65], view_iv["v"]) view.initialize(v=-60) self.assertEqual([-65, -60, -65, -60, -65], pop.get_initial_value("v")) self.assertEqual([-60, -60], view.initial_values["v"]) self.assertEqual([-60, -65], view2.initial_values["v"]) rand_distr = RandomDistribution("uniform", parameters_pos=[-65.0, -55.0], rng=NumpyRNG(seed=85524)) view.initialize(v=rand_distr) self.assertEqual([-64.43349869042906, -63.663421790102184], view.initial_values["v"]) view.initialize(v=lambda i: -65 + i / 10.0) self.assertEqual([-64.9, -64.7], view.initial_values["v"]) sim.end()
def obtain_synapses(wiring_plan): rng = NumpyRNG(seed=64754) delay_distr = RandomDistribution('normal', [2, 1e-1], rng=rng) weight_distr = RandomDistribution('normal', [45, 1e-1], rng=rng) flat_iter = [ (i,j,k,xaxis) for i,j in enumerate(filtered) for k,xaxis in enumerate(j) ] index_exc = list(set( source for (source,j,target,xaxis) in flat_iter if xaxis==1 or xaxis == 2 )) index_inh = list(set( source for (source,j,target,xaxis) in flat_iter if xaxis==-1 or xaxis == -2 )) EElist = [] IIlist = [] EIlist = [] IElist = [] for (source,j,target,xaxis) in flat_iter: delay = delay_distr.next() weight = 1.0 # will be updated later. if xaxis==1 or xaxis == 2: if target in index_inh: EIlist.append((source,target,delay,weight)) else: EElist.append((source,target,delay,weight)) if xaxis==-1 or xaxis == -2: if target in index_exc: IElist.append((source,target,delay,weight)) else: IIlist.append((source,target,delay,weight)) conn_ee = sim.FromListConnector(EElist) conn_ie = sim.FromListConnector(IElist) conn_ei = sim.FromListConnector(EIlist) conn_ii = sim.FromListConnector(IIlist) return (conn_ee, conn_ie, conn_ei, conn_ii,index_exc,index_inh)
dfEE.loc[0].keys() dfm = dfEE.as_matrix() rcls = dfm[:, :1] # real cell labels. rcls = rcls[1:] rcls = {k: v for k, v in enumerate(rcls)} # real cell labels, cast to dictionary import pickle with open('cell_names.p', 'wb') as f: pickle.dump(rcls, f) import pandas as pd pd.DataFrame(rcls).to_csv('cell_names.csv', index=False) filtered = dfm[:, 3:] filtered = filtered[1:] rng = NumpyRNG(seed=64754) delay_distr = RandomDistribution('normal', [45, 1e-1], rng=rng) index_exc = [i for i, d in enumerate(dfm) if '+' in d[0]] index_inh = [i for i, d in enumerate(dfm) if '-' in d[0]] EElist = [] IIlist = [] EIlist = [] IElist = [] for i, j in enumerate(filtered): for k, xaxis in enumerate(j): if xaxis == 1 or xaxis == 2: source = i target = k
def setupNetwork(): node = pynn.setup(timestep=0.1, min_delay=1.0, max_delay=1.0, debug=True, quit_on_end=False) print "Process with rank %d running on %s" % (node, socket.gethostname()) rng = NumpyRNG(seed=seed, parallel_safe=True) print "[%d] Creating populations" % node # 1) create excitatory populations l0_exc_population = pynn.Population(num['l0_exc_neurons'], native_cell_type('aeif_cond_exp'), cell_params_adex, label="exc0") l0_exc_population.record() l0_exc_population.record_v() l1_exc_population = pynn.Population(num['l1_exc_neurons'], pynn.IF_cond_exp, cell_params, label="exc1") l1_exc_population.record() # 2) create inhibitory population l0_inh_population = pynn.Population(num['l0_inh_neurons'], pynn.IF_cond_exp, cell_params, label="inh0") l0_inh_population.record() l1_inh_population = pynn.Population(num['l1_inh_neurons'], pynn.IF_cond_exp, cell_params, label="inh1") l1_inh_population.record() # 3) exc0 -> inh0 inh_connector = pynn.FixedProbabilityConnector(p_exc0_inh0, weights=w_exc0_inh0) l0_exc_inh_projection = pynn.Projection(l0_exc_population, l0_inh_population, inh_connector) # 4) exc1 -> inh1 inh_connector = pynn.FixedProbabilityConnector(p_exc1_inh1, weights=w_exc1_inh1) l1_exc_inh_projection = pynn.Projection(l1_exc_population, l1_inh_population, inh_connector) # 5) exc0 -> exc0 exc_connector = pynn.AllToAllConnector(weights=0.0) l0_exc_exc_projection = pynn.Projection(l0_exc_population, l0_exc_population, exc_connector) exc0_exc0_weights = l0_exc_exc_projection.getWeights() exc0_exc0_weights = connect_gauss(num['l0_exc_neurons'], num['l0_exc_neurons'], sigma_exc0_exc0, w_exc0_exc0_max, num['l0_exc_maxneighbors'], exc0_exc0_weights, True) l0_exc_exc_projection.setWeights(exc0_exc0_weights) # 6) exc1 -> exc1 exc_connector = pynn.AllToAllConnector(weights=0.0) l1_exc_exc_projection = pynn.Projection(l1_exc_population, l1_exc_population, exc_connector) exc1_exc1_weights = l1_exc_exc_projection.getWeights() exc1_exc1_weights = connect_gauss(num['l1_exc_neurons'], num['l1_exc_neurons'], sigma_exc1_exc1, w_exc1_exc1_max, num['l1_exc_maxneighbors'], exc1_exc1_weights, True) l1_exc_exc_projection.setWeights(exc1_exc1_weights) # 7) inh0 -> exc0 connector = pynn.FixedProbabilityConnector(p_inh0_exc0, weights=w_inh0_exc0) l0_inh_exc_projection = pynn.Projection(l0_inh_population, l0_exc_population, connector, target="inhibitory") # 8) inh1 -> exc1 connector = pynn.FixedProbabilityConnector(p_inh1_exc1, weights=w_inh1_exc1) l1_inh_exc_projection = pynn.Projection(l1_inh_population, l1_exc_population, connector, target="inhibitory") # 9) create input population input_population = pynn.Population(num['inputs'], pynn.SpikeSourcePoisson, {'rate': input_rate}, label="input") input_population.record() # 10) input -> exc0 stdp_model = pynn.STDPMechanism( timing_dependence=pynn.SpikePairRule(tau_plus=10.0, tau_minus=15.0), weight_dependence=pynn.AdditiveWeightDependence(w_min=0, w_max=w_inp_exc0_max, A_plus=0.012, A_minus=0.012)) connector = pynn.AllToAllConnector(weights=0.0) input_projection = pynn.Projection( input_population, l0_exc_population, connector, rng=rng, synapse_dynamics=pynn.SynapseDynamics(slow=stdp_model)) input_weights = input_projection.getWeights() print "[%d] Creating input projections" % node input_weights = connect_gauss(num['inputs'], num['l0_exc_neurons'], sigma_inp_exc0, w_inp_exc0_peak, num['inputs_maxneighbors'], input_weights, False) input_projection.setWeights(input_weights) # 11) exc0 -> exc1 stdp_model = pynn.STDPMechanism( timing_dependence=pynn.SpikePairRule(tau_plus=20.0, tau_minus=20.0), weight_dependence=pynn.AdditiveWeightDependence(w_min=0, w_max=w_exc0_exc1_max, A_plus=0.012, A_minus=0.012)) connector = pynn.AllToAllConnector(weights=0.0) l1_projection = pynn.Projection( l0_exc_population, l1_exc_population, connector, rng=rng, synapse_dynamics=pynn.SynapseDynamics(slow=stdp_model)) exc0_exc1_weights = l1_projection.getWeights() print "[%d] Creating input projections" % node exc0_exc1_weights = connect_gauss(num['l0_exc_neurons'], num['l1_exc_neurons'], sigma_exc0_exc1, w_exc0_exc1_peak, num['l0_l1_maxneighbors'], exc0_exc1_weights, False) l1_projection.setWeights(exc0_exc1_weights) return node, l0_exc_population, l1_exc_population, l0_inh_population, l1_inh_population, input_population, input_projection, l1_projection
import pyNN.brian as sim from pyNN.random import RandomDistribution, NumpyRNG Exc_in = 32 Inh_in = 32 noSpikes = 20 # number of spikes per chanel per simulation run stimSpikes = RandomDistribution( 'uniform', low=0, high=500.0, rng=NumpyRNG(seed=72386) ).next( [Exc_in + Inh_in, noSpikes] ) # generate a time uniform distributed signal with Exc_in + Inh_in chanels and noSpikes for each chanel # print stimSpikes for i in range(Exc_in): if i == 0: Excinp = sim.Population( 1, sim.SpikeSourceArray(spike_times=stimSpikes[i, :])) else: spike_source = sim.Population( 1, sim.SpikeSourceArray(spike_times=stimSpikes[i, :])) Excinp = Excinp + spike_source for i in range(Inh_in): if i == 0: Inhinp = sim.Population( 1, sim.SpikeSourceArray(spike_times=stimSpikes[i + Exc_in, :])) else: spike_source = sim.Population( 1, sim.SpikeSourceArray(spike_times=stimSpikes[i + Exc_in, :])) Inhinp = Inhinp + spike_source
celltype(**cell_params), label="Excitatory_Cells") inh_cells = sim.Population(n_inh, celltype(**cell_params), label="Inhibitory_Cells") if options.benchmark == "COBA": ext_stim = sim.Population(20, sim.SpikeSourcePoisson(rate=rate, duration=stim_dur), label="expoisson") rconn = 0.01 ext_conn = sim.FixedProbabilityConnector(rconn) ext_syn = sim.StaticSynapse(weight=0.1) print "%s Initialising membrane potential to random values..." % node_id rng = NumpyRNG(seed=rngseed, parallel_safe=parallel_safe) uniformDistr = RandomDistribution('uniform', [v_reset, v_thresh], rng=rng) exc_cells.initialize(v=uniformDistr) inh_cells.initialize(v=uniformDistr) print "%s Connecting populations..." % node_id progress_bar = ProgressBar(width=20) connector = sim.FixedProbabilityConnector(pconn, rng=rng, callback=progress_bar) exc_syn = sim.StaticSynapse(weight=w_exc, delay=delay) inh_syn = sim.StaticSynapse(weight=w_inh, delay=delay) connections = {} connections['e2e'] = sim.Projection(exc_cells, exc_cells,
import spynnaker8 as sim import spynnaker8.spynnaker_plotting as splot import pyNN.utility.plotting as plot import matplotlib.pyplot as plt from pyNN.random import RandomDistribution, NumpyRNG # Remove random effect for testing # Set to None to randomize rng = NumpyRNG(seed=1) # Choose the number of neurons to be simulated in the network. n_neurons = 100 n_exc = int(round(n_neurons * 0.8)) n_inh = int(round(n_neurons * 0.2)) simtime = 5000 # Set up the simulation to use 0.1ms timesteps. sim.setup(timestep=0.1) # Create an excitatory population with 80% of the neurons and # an inhibitory population with 20% of the neurons. pop_exc = sim.Population(n_exc, sim.IF_curr_exp(), label="Excitatory") pop_inh = sim.Population(n_inh, sim.IF_curr_exp(), label="Inhibitory") # Create excitatory poisson stimulation population with 80% # of the neurons and # an inhibitory poisson stimulation population with 20% of the neurons, # both with a rate of 1000Hz # TODO RATE? if rng is None: seed = None
def do_run(plot): p.setup(timestep=1.0) cell_params_lif = { 'cm': 0.25, 'i_offset': 0.0, 'tau_m': 20.0, 'tau_refrac': 2.0, 'tau_syn_E': 5.0, 'tau_syn_I': 5.0, 'v_reset': -70.0, 'v_rest': -65.0, 'v_thresh': -50.0 } def create_grid(n, label, dx=1.0, dy=1.0): grid_structure = p.Grid2D(dx=dx, dy=dy, x0=0.0, y0=0.0) return p.Population(n * n, p.IF_curr_exp(**cell_params_lif), structure=grid_structure, label=label) # Parameters n = 5 weight_to_spike = 2.0 delay = 2 runtime = 1000 p.set_number_of_neurons_per_core(p.IF_curr_exp, 100) # Network population small_world = create_grid(n, 'small_world') # SpikeInjector injectionConnection = [(0, 0)] spikeArray = {'spike_times': [[0]]} inj_pop = p.Population(1, p.SpikeSourceArray(**spikeArray), label='inputSpikes_1') # Injector projection p.Projection(inj_pop, small_world, p.FromListConnector(injectionConnection), p.StaticSynapse(weight=weight_to_spike, delay=delay)) # Connectors degree = 2.0 rewiring = 0.4 rng = NumpyRNG(seed=1) small_world_connector = p.SmallWorldConnector(degree, rewiring, rng=rng) # Projection for small world grid sw_pro = p.Projection(small_world, small_world, small_world_connector, p.StaticSynapse(weight=2.0, delay=5)) small_world.record(['v', 'spikes']) p.run(runtime) v = small_world.get_data('v') spikes = small_world.get_data('spikes') weights = sw_pro.get('weight', 'list') if plot: # pylint: disable=no-member Figure( # raster plot of the presynaptic neuron spike times Panel(spikes.segments[0].spiketrains, yticks=True, markersize=0.2, xlim=(0, runtime), xticks=True), # membrane potential of the postsynaptic neuron Panel(v.segments[0].filter(name='v')[0], ylabel="Membrane potential (mV)", data_labels=[small_world.label], yticks=True, xlim=(0, runtime), xticks=True), title="Simple small world connector", annotations="Simulated with {}".format(p.name())) plt.show() p.end() return v, spikes, weights
def setupNetwork(): node = pynn.setup(timestep=0.1, min_delay=1.0, max_delay=1.0, debug=True, quit_on_end=False) print "Process with rank %d running on %s" % (node, socket.gethostname()) rng = NumpyRNG(seed=seed, parallel_safe=True) print "[%d] Creating populations" % node # 1) create excitatory populations l0_exc_population = pynn.Population(num['l0_exc_neurons'], pynn.IF_cond_exp, cell_params, label="exc0") l0_exc_population.record() #l0_exc_population.record_v() l1_exc_population = pynn.Population(num['l1_exc_neurons'], pynn.IF_cond_exp, cell_params, label="exc1") l1_exc_population.record() # 2) create inhibitory population l0_inh_population = pynn.Population(num['l0_inh_neurons'], pynn.IF_cond_exp, cell_params, label="inh0") l0_inh_population.record() l1_inh_population = pynn.Population(num['l1_inh_neurons'], pynn.IF_cond_exp, cell_params, label="inh1") l1_inh_population.record() # 3) exc0 -> inh0 inh_connector = pynn.FixedProbabilityConnector(p_exc0_inh0, weights=w_exc0_inh0) l0_exc_inh_projection = pynn.Projection(l0_exc_population, l0_inh_population, inh_connector) # 4) exc1 -> inh1 inh_connector = pynn.FixedProbabilityConnector(p_exc1_inh1, weights=w_exc1_inh1) l1_exc_inh_projection = pynn.Projection(l1_exc_population, l1_inh_population, inh_connector) # 5) exc0 -> exc0 exc_connector = pynn.FixedProbabilityConnector( p_exc0_exc0, weights=w_exc0_exc0, allow_self_connections=False) l0_exc_exc_projection = pynn.Projection(l0_exc_population, l0_exc_population, exc_connector) # 6) exc1 -> exc1 #l1_exc_exc_projection = pynn.Projection(l1_exc_population,l1_exc_population,exc_connector) #for i in range(num['nodes']): # exc_inh_projections.append(Projection(exc_populations[i],inh_population,inh_connector)) # for j in range(i-num['neighbours'],i+num['neighbours']+1): # if j != i: # exc_connector = OneToOneConnector(weights=1.0/abs(j-i)) # if j<0: # j+=num['nodes'] # if j> num['nodes']-1: # j-=num['nodes'] # exc_exc_projections.append(Projection(exc_populations[i],exc_populations[j],exc_connector)) # 7) inh0 -> exc0 connector = pynn.FixedProbabilityConnector(p_inh0_exc0, weights=w_inh0_exc0) l0_inh_exc_projection = pynn.Projection(l0_inh_population, l0_exc_population, connector, target="inhibitory") # 8) inh1 -> exc1 connector = pynn.FixedProbabilityConnector(p_inh1_exc1, weights=w_inh1_exc1) l1_inh_exc_projection = pynn.Projection(l1_inh_population, l1_exc_population, connector, target="inhibitory") # 9) create input population input_population = pynn.Population(num['inputs'], pynn.SpikeSourcePoisson, {'rate': input_rate}, label="input") input_population.record() # 10) input -> exc0 stdp_model = pynn.STDPMechanism( timing_dependence=pynn.SpikePairRule(tau_plus=20.0, tau_minus=20.0), weight_dependence=pynn.AdditiveWeightDependence(w_min=0, w_max=w_inp_exc0_max, A_plus=0.01, A_minus=0.012)) connector = pynn.AllToAllConnector(weights=pynn.RandomDistribution( distribution='uniform', parameters=[0.00, w_inp_exc0_max], rng=rng)) #connector = pynn.FixedProbabilityConnector(p_inp_exc0,weights=w_inp_exc0) input_projection = pynn.Projection( input_population, l0_exc_population, connector, rng=rng, #synapse_dynamics=pynn.SynapseDynamics(slow=stdp_model) ) # 11) exc0 -> exc1 stdp_model = pynn.STDPMechanism( timing_dependence=pynn.SpikePairRule(tau_plus=20.0, tau_minus=20.0), weight_dependence=pynn.AdditiveWeightDependence(w_min=0, w_max=w_exc0_exc1_max, A_plus=0.01, A_minus=0.012)) connector = pynn.AllToAllConnector(weights=pynn.RandomDistribution( distribution='uniform', parameters=[0.00, w_exc0_exc1_max], rng=rng)) #connector = pynn.FixedProbabilityConnector(0.05, weights=0.01) l1_projection = pynn.Projection( l0_exc_population, l1_exc_population, connector, rng=rng, #synapse_dynamics=pynn.SynapseDynamics(slow=stdp_model) ) return node, l0_exc_population, l1_exc_population, l0_inh_population, l1_inh_population, input_population, input_projection, l1_projection
def __init__(self, boundary, origin=(0.0, 0.0, 0.0), rng=None): assert isinstance(boundary, Shape) assert len(origin) == 3 self.boundary = boundary self.origin = origin self.rng = rng or NumpyRNG()
def setup(self, sim): # Create matrix of synaptic weights self.w = create_weight_matrix() model = getattr(sim, 'IF_curr_exp') script_rng = NumpyRNG(seed=6508015, parallel_safe=parallel_safe) distr = RandomDistribution('normal', [V0_mean, V0_sd], rng=script_rng) # Create cortical populations self.pops = {} for layer in layers: self.pops[layer] = {} for pop in pops: self.pops[layer][pop] = sim.Population( int(N_full[layer][pop] * N_scaling), model, cellparams=neuron_params) self.pops[layer][pop].initialize(v=distr) # Store whether population is inhibitory or excitatory self.pops[layer][pop].annotate(type=pop) this_pop = self.pops[layer][pop] # Spike recording if record_fraction: num_spikes = int(round(this_pop.size * frac_record_spikes)) else: num_spikes = n_record this_pop[0:num_spikes].record('spikes') # Membrane potential recording if record_v: if record_fraction: num_v = int(round(this_pop.size * frac_record_v)) else: num_v = n_record_v this_pop[0:num_v].record('v') # Create thalamic population if thalamic_input: self.thalamic_population = sim.Population( thal_params['n_thal'], sim.SpikeSourcePoisson, { 'rate': thal_params['rate'], 'start': thal_params['start'], 'duration': thal_params['duration'] }) # Compute DC input before scaling if input_type == 'DC': self.DC_amp = {} for target_layer in layers: self.DC_amp[target_layer] = {} for target_pop in pops: self.DC_amp[target_layer][target_pop] = bg_rate * \ K_ext[target_layer][target_pop] * w_mean * neuron_params['tau_syn_E'] / \ 1000. else: self.DC_amp = { 'L23': { 'E': 0., 'I': 0. }, 'L4': { 'E': 0., 'I': 0. }, 'L5': { 'E': 0., 'I': 0. }, 'L6': { 'E': 0., 'I': 0. } } # Scale and connect # In-degrees of the full-scale model K_full = scaling.get_indegrees() if K_scaling != 1: self.w, self.w_ext, self.K_ext, self.DC_amp = scaling.adjust_w_and_ext_to_K( K_full, K_scaling, self.w, self.DC_amp) else: self.w_ext = w_ext if sim.rank() == 0: print('w: %s' % self.w) for target_layer in layers: for target_pop in pops: target_index = structure[target_layer][target_pop] this_pop = self.pops[target_layer][target_pop] # External inputs if input_type == 'DC' or K_scaling != 1: this_pop.set( i_offset=self.DC_amp[target_layer][target_pop]) if input_type == 'poisson': poisson_generator = sim.Population( this_pop.size, sim.SpikeSourcePoisson, { 'rate': bg_rate * self.K_ext[target_layer][target_pop] }) conn = sim.OneToOneConnector() syn = sim.StaticSynapse(weight=self.w_ext) sim.Projection(poisson_generator, this_pop, conn, syn, receptor_type='excitatory') if thalamic_input: # Thalamic inputs if sim.rank() == 0: print('creating thalamic connections to %s%s' % (target_layer, target_pop)) C_thal = thal_params['C'][target_layer][target_pop] n_target = N_full[target_layer][target_pop] K_thal = round( np.log(1 - C_thal) / np.log( (n_target * thal_params['n_thal'] - 1.) / (n_target * thal_params['n_thal']))) / n_target FixedTotalNumberConnect(sim, self.thalamic_population, this_pop, K_thal, w_ext, w_rel * w_ext, d_mean['E'], d_sd['E']) # Recurrent inputs for source_layer in layers: for source_pop in pops: source_index = structure[source_layer][source_pop] if sim.rank() == 0: print('creating connections from %s%s to %s%s' % (source_layer, source_pop, target_layer, target_pop)) weight = self.w[target_index][source_index] if source_pop == 'E' and source_layer == 'L4' and target_layer == 'L23' and target_pop == 'E': w_sd = weight * w_rel_234 else: w_sd = abs(weight * w_rel) FixedTotalNumberConnect( sim, self.pops[source_layer][source_pop], self.pops[target_layer][target_pop], K_full[target_index][source_index] * K_scaling, weight, w_sd, d_mean[source_pop], d_sd[source_pop])
def sim_runner(wgf): wg = wgf import pyNN.neuron as sim nproc = sim.num_processes() node = sim.rank() print(nproc) import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams.update({'font.size':16}) #import mpi4py #threads = sim.rank() threads = 1 rngseed = 98765 parallel_safe = False #extra = {'threads' : threads} import os import pandas as pd import sys import numpy as np from pyNN.neuron import STDPMechanism import copy from pyNN.random import RandomDistribution, NumpyRNG import pyNN.neuron as neuron from pyNN.neuron import h from pyNN.neuron import StandardCellType, ParameterSpace from pyNN.random import RandomDistribution, NumpyRNG from pyNN.neuron import STDPMechanism, SpikePairRule, AdditiveWeightDependence, FromListConnector, TsodyksMarkramSynapse from pyNN.neuron import Projection, OneToOneConnector from numpy import arange import pyNN from pyNN.utility import get_simulator, init_logging, normalized_filename import random import socket #from neuronunit.optimization import get_neab import networkx as nx sim = pyNN.neuron # Get some hippocampus connectivity data, based on a conversation with # academic researchers on GH: # https://github.com/Hippocampome-Org/GraphTheory/issues?q=is%3Aissue+is%3Aclosed # scrape hippocamome connectivity data, that I intend to use to program neuromorphic hardware. # conditionally get files if they don't exist. path_xl = '_hybrid_connectivity_matrix_20171103_092033.xlsx' if not os.path.exists(path_xl): os.system('wget https://github.com/Hippocampome-Org/GraphTheory/files/1657258/_hybrid_connectivity_matrix_20171103_092033.xlsx') xl = pd.ExcelFile(path_xl) dfEE = xl.parse() dfEE.loc[0].keys() dfm = dfEE.as_matrix() rcls = dfm[:,:1] # real cell labels. rcls = rcls[1:] rcls = { k:v for k,v in enumerate(rcls) } # real cell labels, cast to dictionary import pickle with open('cell_names.p','wb') as f: pickle.dump(rcls,f) import pandas as pd pd.DataFrame(rcls).to_csv('cell_names.csv', index=False) filtered = dfm[:,3:] filtered = filtered[1:] rng = NumpyRNG(seed=64754) delay_distr = RandomDistribution('normal', [2, 1e-1], rng=rng) weight_distr = RandomDistribution('normal', [45, 1e-1], rng=rng) sanity_e = [] sanity_i = [] EElist = [] IIlist = [] EIlist = [] IElist = [] for i,j in enumerate(filtered): for k,xaxis in enumerate(j): if xaxis == 1 or xaxis == 2: source = i sanity_e.append(i) target = k if xaxis ==-1 or xaxis == -2: sanity_i.append(i) source = i target = k index_exc = list(set(sanity_e)) index_inh = list(set(sanity_i)) import pickle with open('cell_indexs.p','wb') as f: returned_list = [index_exc, index_inh] pickle.dump(returned_list,f) import numpy a = numpy.asarray(index_exc) numpy.savetxt('pickles/'+str(k)+'excitatory_nunber_labels.csv', a, delimiter=",") import numpy a = numpy.asarray(index_inh) numpy.savetxt('pickles/'+str(k)+'inhibitory_nunber_labels.csv', a, delimiter=",") for i,j in enumerate(filtered): for k,xaxis in enumerate(j): if xaxis==1 or xaxis == 2: source = i sanity_e.append(i) target = k delay = delay_distr.next() weight = 1.0 if target in index_inh: EIlist.append((source,target,delay,weight)) else: EElist.append((source,target,delay,weight)) if xaxis==-1 or xaxis == -2: sanity_i.append(i) source = i target = k delay = delay_distr.next() weight = 1.0 if target in index_exc: IElist.append((source,target,delay,weight)) else: IIlist.append((source,target,delay,weight)) internal_conn_ee = sim.FromListConnector(EElist) ee = internal_conn_ee.conn_list ee_srcs = ee[:,0] ee_tgs = ee[:,1] internal_conn_ie = sim.FromListConnector(IElist) ie = internal_conn_ie.conn_list ie_srcs = set([ int(e[0]) for e in ie ]) ie_tgs = set([ int(e[1]) for e in ie ]) internal_conn_ei = sim.FromListConnector(EIlist) ei = internal_conn_ei.conn_list ei_srcs = set([ int(e[0]) for e in ei ]) ei_tgs = set([ int(e[1]) for e in ei ]) internal_conn_ii = sim.FromListConnector(IIlist) ii = internal_conn_ii.conn_list ii_srcs = set([ int(e[0]) for e in ii ]) ii_tgs = set([ int(e[1]) for e in ii ]) for e in internal_conn_ee.conn_list: assert e[0] in ee_srcs assert e[1] in ee_tgs for i in internal_conn_ii.conn_list: assert i[0] in ii_srcs assert i[1] in ii_tgs ml = len(filtered[1])+1 pre_exc = [] post_exc = [] pre_inh = [] post_inh = [] rng = NumpyRNG(seed=64754) delay_distr = RandomDistribution('normal', [2, 1e-1], rng=rng) plot_EE = np.zeros(shape=(ml,ml), dtype=bool) plot_II = np.zeros(shape=(ml,ml), dtype=bool) plot_EI = np.zeros(shape=(ml,ml), dtype=bool) plot_IE = np.zeros(shape=(ml,ml), dtype=bool) for i in EElist: plot_EE[i[0],i[1]] = int(0) #plot_ss[i[0],i[1]] = int(1) if i[0]!=i[1]: # exclude self connections plot_EE[i[0],i[1]] = int(1) pre_exc.append(i[0]) post_exc.append(i[1]) assert len(pre_exc) == len(post_exc) for i in IIlist: plot_II[i[0],i[1]] = int(0) if i[0]!=i[1]: plot_II[i[0],i[1]] = int(1) pre_inh.append(i[0]) post_inh.append(i[1]) for i in IElist: plot_IE[i[0],i[1]] = int(0) if i[0]!=i[1]: # exclude self connections plot_IE[i[0],i[1]] = int(1) pre_inh.append(i[0]) post_inh.append(i[1]) for i in EIlist: plot_EI[i[0],i[1]] = int(0) if i[0]!=i[1]: plot_EI[i[0],i[1]] = int(1) pre_exc.append(i[0]) post_exc.append(i[1]) plot_excit = plot_EI + plot_EE plot_inhib = plot_IE + plot_II assert len(pre_inh) == len(post_inh) num_exc = [ i for i,e in enumerate(plot_excit) if sum(e) > 0 ] num_inh = [ y for y,i in enumerate(plot_inhib) if sum(i) > 0 ] # the network is dominated by inhibitory neurons, which is unusual for modellers. assert num_inh > num_exc assert np.sum(plot_inhib) > np.sum(plot_excit) assert len(num_exc) < ml assert len(num_inh) < ml # # Plot all the Projection pairs as a connection matrix (Excitatory and Inhibitory Connections) import pickle with open('graph_inhib.p','wb') as f: pickle.dump(plot_inhib,f, protocol=2) import pickle with open('graph_excit.p','wb') as f: pickle.dump(plot_excit,f, protocol=2) #with open('cell_names.p','wb') as f: # pickle.dump(rcls,f) import pandas as pd pd.DataFrame(plot_EE).to_csv('ee.csv', index=False) import pandas as pd pd.DataFrame(plot_IE).to_csv('ie.csv', index=False) import pandas as pd pd.DataFrame(plot_II).to_csv('ii.csv', index=False) import pandas as pd pd.DataFrame(plot_EI).to_csv('ei.csv', index=False) from scipy.sparse import coo_matrix m = np.matrix(filtered[1:]) bool_matrix = np.add(plot_excit,plot_inhib) with open('bool_matrix.p','wb') as f: pickle.dump(bool_matrix,f, protocol=2) if not isinstance(m, coo_matrix): m = coo_matrix(m) Gexc_ud = nx.Graph(plot_excit) avg_clustering = nx.average_clustering(Gexc_ud)#, nodes=None, weight=None, count_zeros=True)[source] rc = nx.rich_club_coefficient(Gexc_ud,normalized=False) print('This graph structure as rich as: ',rc[0]) gexc = nx.DiGraph(plot_excit) gexcc = nx.betweenness_centrality(gexc) top_exc = sorted(([ (v,k) for k, v in dict(gexcc).items() ]), reverse=True) in_degree = gexc.in_degree() top_in = sorted(([ (v,k) for k, v in in_degree.items() ])) in_hub = top_in[-1][1] out_degree = gexc.out_degree() top_out = sorted(([ (v,k) for k, v in out_degree.items() ])) out_hub = top_out[-1][1] mean_out = np.mean(list(out_degree.values())) mean_in = np.mean(list(in_degree.values())) mean_conns = int(mean_in + mean_out/2) k = 2 # number of neighbouig nodes to wire. p = 0.25 # probability of instead wiring to a random long range destination. ne = len(plot_excit)# size of small world network small_world_ring_excit = nx.watts_strogatz_graph(ne,mean_conns,0.25) k = 2 # number of neighbouring nodes to wire. p = 0.25 # probability of instead wiring to a random long range destination. ni = len(plot_inhib)# size of small world network small_world_ring_inhib = nx.watts_strogatz_graph(ni,mean_conns,0.25) nproc = sim.num_processes() nproc = 8 host_name = socket.gethostname() node_id = sim.setup(timestep=0.01, min_delay=1.0)#, **extra) print("Host #%d is on %s" % (node_id + 1, host_name)) rng = NumpyRNG(seed=64754) #pop_size = len(num_exc)+len(num_inh) #num_exc = [ i for i,e in enumerate(plot_excit) if sum(e) > 0 ] #num_inh = [ y for y,i in enumerate(plot_inhib) if sum(i) > 0 ] #pop_exc = sim.Population(len(num_exc), sim.Izhikevich(a=0.02, b=0.2, c=-65, d=8, i_offset=0)) #pop_inh = sim.Population(len(num_inh), sim.Izhikevich(a=0.02, b=0.25, c=-65, d=2, i_offset=0)) #index_exc = list(set(sanity_e)) #index_inh = list(set(sanity_i)) all_cells = sim.Population(len(index_exc)+len(index_inh), sim.Izhikevich(a=0.02, b=0.2, c=-65, d=8, i_offset=0)) #all_cells = None #all_cells = pop_exc + pop_inh pop_exc = sim.PopulationView(all_cells,index_exc) pop_inh = sim.PopulationView(all_cells,index_inh) #print(pop_exc) #print(dir(pop_exc)) for pe in pop_exc: print(pe) #import pdb pe = all_cells[pe] #pdb.set_trace() #pe = all_cells[i] r = random.uniform(0.0, 1.0) pe.set_parameters(a=0.02, b=0.2, c=-65+15*r, d=8-r**2, i_offset=0) #pop_exc.append(pe) #pop_exc = sim.Population(pop_exc) for pi in index_inh: pi = all_cells[pi] #print(pi) #pi = all_cells[i] r = random.uniform(0.0, 1.0) pi.set_parameters(a=0.02+0.08*r, b=0.25-0.05*r, c=-65, d= 2, i_offset=0) #pop_inh.append(pi) #pop_inh = sim.Population(pop_inh) ''' for pe in pop_exc: r = random.uniform(0.0, 1.0) pe.set_parameters(a=0.02, b=0.2, c=-65+15*r, d=8-r**2, i_offset=0) for pi in pop_inh: r = random.uniform(0.0, 1.0) pi.set_parameters(a=0.02+0.08*r, b=0.25-0.05*r, c=-65, d= 2, i_offset=0) ''' NEXC = len(num_exc) NINH = len(num_inh) exc_syn = sim.StaticSynapse(weight = wg, delay=delay_distr) assert np.any(internal_conn_ee.conn_list[:,0]) < ee_srcs.size prj_exc_exc = sim.Projection(all_cells, all_cells, internal_conn_ee, exc_syn, receptor_type='excitatory') prj_exc_inh = sim.Projection(all_cells, all_cells, internal_conn_ei, exc_syn, receptor_type='excitatory') inh_syn = sim.StaticSynapse(weight = wg, delay=delay_distr) delay_distr = RandomDistribution('normal', [1, 100e-3], rng=rng) prj_inh_inh = sim.Projection(all_cells, all_cells, internal_conn_ii, inh_syn, receptor_type='inhibitory') prj_inh_exc = sim.Projection(all_cells, all_cells, internal_conn_ie, inh_syn, receptor_type='inhibitory') inh_distr = RandomDistribution('normal', [1, 2.1e-3], rng=rng) def prj_change(prj,wg): prj.setWeights(wg) prj_change(prj_exc_exc,wg) prj_change(prj_exc_inh,wg) prj_change(prj_inh_exc,wg) prj_change(prj_inh_inh,wg) def prj_check(prj): for w in prj.weightHistogram(): for i in w: print(i) prj_check(prj_exc_exc) prj_check(prj_exc_inh) prj_check(prj_inh_exc) prj_check(prj_inh_inh) #print(rheobase['value']) #print(float(rheobase['value']),1.25/1000.0) '''Old values that worked noise = sim.NoisyCurrentSource(mean=0.85/1000.0, stdev=5.00/1000.0, start=0.0, stop=2000.0, dt=1.0) pop_exc.inject(noise) #1000.0 pA noise = sim.NoisyCurrentSource(mean=1.740/1000.0, stdev=5.00/1000.0, start=0.0, stop=2000.0, dt=1.0) pop_inh.inject(noise) #1750.0 pA ''' noise = sim.NoisyCurrentSource(mean=0.74/1000.0, stdev=4.00/1000.0, start=0.0, stop=2000.0, dt=1.0) pop_exc.inject(noise) #1000.0 pA noise = sim.NoisyCurrentSource(mean=1.440/1000.0, stdev=4.00/1000.0, start=0.0, stop=2000.0, dt=1.0) pop_inh.inject(noise) ## # Setup and run a simulation. Note there is no current injection into the neuron. # All cells in the network are in a quiescent state, so its not a surprise that xthere are no spikes ## sim = pyNN.neuron arange = np.arange import re all_cells.record(['v','spikes']) # , 'u']) all_cells.initialize(v=-65.0, u=-14.0) # === Run the simulation ===================================================== tstop = 2000.0 sim.run(tstop) data = None data = all_cells.get_data().segments[0] #print(len(data.analogsignals[0].times)) with open('pickles/qi'+str(wg)+'.p', 'wb') as f: pickle.dump(data,f) # make data none or else it will grow in a loop all_cells = None data = None noise = None
'e_rev_I': e_rev_I, 'v_reset': v_reset, 'tau_refrac': tau_refrac, 'i_offset': i_offset } cell_type = sim.IF_cond_exp(**neuronParameters) input = sim.Population(20, sim.SpikeSourcePoisson(rate=50.0)) output = sim.Population(25, cell_type) rand_distr = RandomDistribution('uniform', (v_reset, v_thresh), rng=NumpyRNG(seed=85524)) output.initialize(v=rand_distr) stdp = sim.STDPMechanism(weight_dependence=sim.AdditiveWeightDependence(w_min=0.0, w_max=0.1), timing_dependence=sim.Vogels2011Rule(eta=0.0, rho=1e-3), weight=0.005, delay=0.5) fpc = sim.FixedProbabilityConnector(0.02, rng=NumpyRNG(seed=854))
v_rest = list() v_thresh = list() for atom in range(0, nNeurons): cm.append(0.25) i_off.append(0.0) tau_m.append(10.0) tau_re.append(2.0) tau_syn_e.append(0.5) tau_syn_i.append(0.5) v_reset.append(-65.0) v_rest.append(-65.0) v_thresh.append(-64.4) gbar_na_distr = RandomDistribution('normal', (20.0, 2.0), rng=NumpyRNG(seed=85524)) cell_params_lif = { 'cm': cm, # nF 'i_offset': i_off, 'tau_m': tau_m, 'tau_refrac': tau_re, 'tau_syn_E': tau_syn_e, 'tau_syn_I': tau_syn_i, 'v_reset': v_reset, 'v_rest': v_rest, 'v_thresh': v_thresh } populations = list() projections = list()
timer = Timer() seed = 764756387 tstop = 1000.0 # ms input_rate = 100.0 # Hz cell_params = {'tau_refrac': 2.0, # ms 'v_thresh': -50.0, # mV 'tau_syn_E': 2.0, # ms 'tau_syn_I': 2.0} # ms n_record = 5 node = setup(timestep=0.025, min_delay=1.0, max_delay=10.0, debug=True, quit_on_end=False) print("Process with rank %d running on %s" % (node, socket.gethostname())) rng = NumpyRNG(seed=seed, parallel_safe=True) print("[%d] Creating populations" % node) n_spikes = int(2 * tstop * input_rate / 1000.0) spike_times = numpy.add.accumulate(rng.next(n_spikes, 'exponential', {'beta': 1000.0 / input_rate}, mask_local=False)) input_population = Population(100, SpikeSourceArray(spike_times=spike_times), label="input") output_population = Population(10, IF_curr_exp(**cell_params), label="output") print("[%d] input_population cells: %s" % (node, input_population.local_cells)) print("[%d] output_population cells: %s" % (node, output_population.local_cells)) print("[%d] Connecting populations" % node) timer.start() connector = CSAConnector(csa.random(0.5)) syn = StaticSynapse(weight=0.1)
def do_run(nNeurons): p.setup(timestep=1.0, min_delay=1.0, max_delay=32.0) p.set_number_of_neurons_per_core("IF_curr_exp", 100) cm = list() i_off = list() tau_m = list() tau_re = list() tau_syn_e = list() tau_syn_i = list() v_reset = list() v_rest = list() v_thresh = list() cell_params_lif = {'cm': cm, 'i_offset': i_off, 'tau_m': tau_m, 'tau_refrac': tau_re, 'tau_syn_E': tau_syn_e, 'tau_syn_I': tau_syn_i, 'v_reset': v_reset, 'v_rest': v_rest, 'v_thresh': v_thresh} for atom in range(0, nNeurons): cm.append(0.25) i_off.append(0.0) tau_m.append(10.0) tau_re.append(2.0) tau_syn_e.append(0.5) tau_syn_i.append(0.5) v_reset.append(-65.0) v_rest.append(-65.0) v_thresh.append(-64.4) gbar_na_distr = RandomDistribution('normal', (20.0, 2.0), rng=NumpyRNG(seed=85524)) cell_params_lif = {'cm': cm, 'i_offset': i_off, 'tau_m': tau_m, 'tau_refrac': tau_re, 'tau_syn_E': tau_syn_e, 'tau_syn_I': tau_syn_i, 'v_reset': v_reset, 'v_rest': v_rest, 'v_thresh': v_thresh} populations = list() projections = list() weight_to_spike = 2 delay = 1 connections = list() for i in range(0, nNeurons): singleConnection = (i, ((i + 1) % nNeurons), weight_to_spike, delay) connections.append(singleConnection) injectionConnection = [(0, 0, weight_to_spike, delay)] spikeArray = {'spike_times': [[0]]} populations.append(p.Population(nNeurons, p.IF_curr_exp, cell_params_lif, label='pop_1')) populations.append(p.Population(1, p.SpikeSourceArray, spikeArray, label='inputSpikes_1')) populations[0].set({'cm': 0.25}) populations[0].set('cm', cm) populations[0].set({'tau_m': tau_m, 'v_thresh': v_thresh}) populations[0].set('i_offset', gbar_na_distr) populations[0].set('i_offset', i_off) projections.append(p.Projection(populations[0], populations[0], p.FromListConnector(connections))) projections.append(p.Projection(populations[1], populations[0], p.FromListConnector(injectionConnection))) populations[0].record_v() populations[0].record_gsyn() populations[0].record() p.run(100) v = populations[0].get_v(compatible_output=True) gsyn = populations[0].get_gsyn(compatible_output=True) spikes = populations[0].getSpikes(compatible_output=True) p.end() return (v, gsyn, spikes)
def network(para, simulator): if simulator == "-nest": sim = import_module("pyNN.nest") elif simulator == "-neuron": sim = import_module("pyNN.neuron") elif simulator == "-brian": sim = import_module("pyNN.brian") # initialize pyNN simulation sim.setup(timestep=para["dt"]) # Parameters for excitatory neurons E_parameters = { "tau_m": para["taumE"], "cm": para["taumE"] / para["R"], "v_rest": para["Vr"], "v_reset": para["Vreset"], "v_thresh": para["Vt"], "tau_refrac": para["tref"], "tau_syn_E": para["tau_syn_e"], "tau_syn_I": para["tau_syn_e"], "i_offset": (para["VextE"] / para["R"]) } # Paramters for inhibitory neurons I_parameters = { "tau_m": para["taumI"], "cm": para["taumI"] / para["R"], "v_rest": para["Vr"], "v_reset": para["Vreset"], "v_thresh": para["Vt"], "tau_refrac": para["tref"], "tau_syn_E": para["tau_syn_e"], "tau_syn_I": para["tau_syn_e"], "i_offset": (para["VextI"] / para["R"]) } ############ All Excitatory and inhibitory neurons ######################## # number of excitatory neurons in one network NE_net = int(para['N'] * para['Ne']) # Total number of excitatory neurons NE = int(para['NAreas'] * NE_net) # number of excitatory neurons in one network NI_net = int(para['N'] * (1 - para['Ne'])) # Total number of inhibitory neurons NI = int((para['NAreas'] * NI_net)) popE = sim.Population(NE, sim.IF_curr_alpha, E_parameters, label="popE") popI = sim.Population(NI, sim.IF_curr_alpha, I_parameters, label="popI") ################################ Noise #################################### # Noise on excitatory neurons stdNoiseE = (para["sigma"] / para["R"]) * (para["taumE"]** 0.5) / (para["dt"]**0.5) for i in range(NE): noise = sim.NoisyCurrentSource(mean=0, stdev=stdNoiseE, start=0.0, stop=para["duration"]) popE[i].inject(noise) # Noise on inhibitory neurons stdNoiseI = (para["sigma"] / para["R"]) * (para["taumI"]** 0.5) / (para["dt"]**0.5) for i in range(NI): noise = sim.NoisyCurrentSource(mean=0, stdev=stdNoiseI, start=0.0, stop=para["duration"]) popI[i].inject(noise) # paramters for initial conditions kernelseed = 5456532 rng = NumpyRNG(kernelseed, parallel_safe=True) uniformDistr = RandomDistribution('uniform', low=para["Vr"], high=para["Vt"], rng=rng) sim.initialize(popE, v=uniformDistr) sim.initialize(popI, v=uniformDistr) # Separate population in population views popEList = [] popIList = [] # Store projections EE = [] IE = [] EI = [] II = [] EElongRange = [] IElongRange = [] for i in range(para['NAreas']): # store sub populations in lists popEList.append(popE[(i * NE_net):((i + 1) * NE_net)]) popIList.append(popI[(i * NI_net):((i + 1) * NI_net)]) #### Synapses # Weights for recurrent connections wEE_alpha = (((1 + para['alpha'] * para["hier"][i]) * para["wEE"]) / para['coeffE'])[0] #[nA] wIE_alpha = (((1 + para['alpha'] * para["hier"][i]) * para["wIE"]) / para['coeffI'])[0] #[nA] wEI_alpha = (para["wEI"] / para['coeffE']) * -1 #[nA] wII_alpha = (para["wII"] / para['coeffI']) * -1 #[nA] # Connections EE_connections = sim.Projection( popEList[i], popEList[i], sim.FixedProbabilityConnector(p_connect=para["probIntra"]), sim.StaticSynapse(weight=wEE_alpha, delay=para["dlocal"])) IE_connections = sim.Projection( popEList[i], popIList[i], sim.FixedProbabilityConnector(p_connect=para["probIntra"]), sim.StaticSynapse(weight=wIE_alpha, delay=para["dlocal"])) EI_connections = sim.Projection( popIList[i], popEList[i], sim.FixedProbabilityConnector(p_connect=para["probIntra"]), sim.StaticSynapse(weight=wEI_alpha, delay=para["dlocal"])) II_connections = sim.Projection( popIList[i], popIList[i], sim.FixedProbabilityConnector(p_connect=para["probIntra"]), sim.StaticSynapse(weight=wII_alpha, delay=para["dlocal"])) # Store projections in lists EE.append(EE_connections) IE.append(IE_connections) EI.append(EI_connections) II.append(II_connections) # Long Range connections for i in range(para['NAreas']): for j in range(para['NAreas']): if i != j: # Weights wEE_alphaLR = ( (1 + para['alpha'] * para["hier"][j]) * para['muEE'] * para["conn"][j, i])[0] / para['coeffE'] wIE_alphaLR = ( (1 + para['alpha'] * para["hier"][j]) * para['muIE'] * para["conn"][j, i])[0] / para['coeffI'] # Delay # Mean for delay distribution meanlr = para["delayMat"][j, i] # Standard deviation for delay distribution varlr = para['lrvar'] * meanlr dLR = RandomDistribution('normal', [meanlr, varlr], rng=NumpyRNG(seed=4242)) # Connections EE_connectionsLR = sim.Projection( popEList[i], popEList[j], sim.FixedProbabilityConnector(p_connect=para["probInter"]), sim.StaticSynapse(weight=wEE_alphaLR, delay=dLR)) IE_connectionsLR = sim.Projection( popEList[i], popIList[j], sim.FixedProbabilityConnector(p_connect=para["probInter"]), sim.StaticSynapse(weight=wIE_alphaLR, delay=dLR)) # Store projections in list EElongRange.append(EE_connectionsLR) IElongRange.append(IE_connectionsLR) # Stimulus amplitude = para['currval'] / para['R'] #[mV] pulse = sim.DCSource(amplitude=amplitude, start=300.0, stop=300.0 + para['currdur']) pulse.inject_into(popEList[0]) # Record data popE.record('spikes') popI.record('spikes') # Run sim.run(para['duration']) # Store spikes spikesE_in = popE.get_data() spikesI_in = popI.get_data() # Generate array with spike data for popE spkpopE = spikeData(popE) spkpopI = spikeData(popI) return spkpopE, spkpopI
print "%d Initialising the simulator with %d threads..." % ( rank, extra['threads']) else: print "%d Initialising the simulator with single thread..." % (rank) # Small function to display information only on node 1 def nprint(s): if (rank == 0): print s timer.start() # start timer on construction print "%d Setting up random number generator" % rank rng = NumpyRNG(kernelseed, parallel_safe=True) print "%d Creating excitatory population with %d neurons." % (rank, NE) celltype = IF_curr_alpha(**cell_params) E_net = Population(NE, celltype, label="E_net") print "%d Creating inhibitory population with %d neurons." % (rank, NI) I_net = Population(NI, celltype, label="I_net") print "%d Initialising membrane potential to random values between %g mV and %g mV." % ( rank, U0, theta) uniformDistr = RandomDistribution('uniform', [U0, theta], rng) E_net.initialize(v=uniformDistr) I_net.initialize(v=uniformDistr) print "%d Creating excitatory Poisson generator with rate %g spikes/s." % (
def do_run(seed=None): random.seed(seed) # SpiNNaker setup sim.setup(timestep=1.0, min_delay=1.0, max_delay=10.0) # +-------------------------------------------------------------------+ # | General Parameters | # +-------------------------------------------------------------------+ # Population parameters model = sim.IF_curr_exp cell_params = { 'cm': 0.25, 'i_offset': 0.0, 'tau_m': 10.0, 'tau_refrac': 2.0, 'tau_syn_E': 2.5, 'tau_syn_I': 2.5, 'v_reset': -70.0, 'v_rest': -65.0, 'v_thresh': -55.4 } # Other simulation parameters e_rate = 200 in_rate = 350 n_stim_test = 5 n_stim_pairing = 10 dur_stim = 20 pop_size = 40 ISI = 150. start_test_pre_pairing = 200. start_pairing = 1500. start_test_post_pairing = 700. simtime = start_pairing + start_test_post_pairing + \ ISI*(n_stim_pairing + n_stim_test) + 550. # let's make it 5000 # Initialisations of the different types of populations IAddPre = [] IAddPost = [] # +-------------------------------------------------------------------+ # | Creation of neuron populations | # +-------------------------------------------------------------------+ # Neuron populations pre_pop = sim.Population(pop_size, model(**cell_params)) post_pop = sim.Population(pop_size, model(**cell_params)) # Test of the effect of activity of the pre_pop population on the post_pop # population prior to the "pairing" protocol : only pre_pop is stimulated for i in range(n_stim_test): IAddPre.append( sim.Population( pop_size, sim.SpikeSourcePoisson(rate=in_rate, start=start_test_pre_pairing + ISI * (i), duration=dur_stim, seed=random.randint(0, 100000000)))) # Pairing protocol : pre_pop and post_pop are stimulated with a 10 ms # difference for i in range(n_stim_pairing): IAddPre.append( sim.Population( pop_size, sim.SpikeSourcePoisson(rate=in_rate, start=start_pairing + ISI * (i), duration=dur_stim, seed=random.randint(0, 100000000)))) IAddPost.append( sim.Population( pop_size, sim.SpikeSourcePoisson(rate=in_rate, start=start_pairing + ISI * (i) + 10., duration=dur_stim, seed=random.randint(0, 100000000)))) # Test post pairing : only pre_pop is stimulated # (and should trigger activity in Post) for i in range(n_stim_test): start = start_pairing + ISI * n_stim_pairing + \ start_test_post_pairing + ISI * i IAddPre.append( sim.Population( pop_size, sim.SpikeSourcePoisson(rate=in_rate, start=start, duration=dur_stim, seed=random.randint(0, 100000000)))) # Noise inputs INoisePre = sim.Population(pop_size, sim.SpikeSourcePoisson(rate=e_rate, start=0, duration=simtime, seed=random.randint( 0, 100000000)), label="expoisson") INoisePost = sim.Population(pop_size, sim.SpikeSourcePoisson(rate=e_rate, start=0, duration=simtime, seed=random.randint( 0, 100000000)), label="expoisson") # +-------------------------------------------------------------------+ # | Creation of connections | # +-------------------------------------------------------------------+ # Connection parameters JEE = 3. # Connection type between noise poisson generator and # excitatory populations ee_connector = sim.OneToOneConnector() # Noise projections sim.Projection(INoisePre, pre_pop, ee_connector, receptor_type='excitatory', synapse_type=sim.StaticSynapse(weight=JEE * 0.05)) sim.Projection(INoisePost, post_pop, ee_connector, receptor_type='excitatory', synapse_type=sim.StaticSynapse(weight=JEE * 0.05)) # Additional Inputs projections for i in range(len(IAddPre)): sim.Projection(IAddPre[i], pre_pop, ee_connector, receptor_type='excitatory', synapse_type=sim.StaticSynapse(weight=JEE * 0.05)) for i in range(len(IAddPost)): sim.Projection(IAddPost[i], post_pop, ee_connector, receptor_type='excitatory', synapse_type=sim.StaticSynapse(weight=JEE * 0.05)) # Plastic Connections between pre_pop and post_pop stdp_model = sim.STDPMechanism( timing_dependence=sim.SpikePairRule(tau_plus=20., tau_minus=50.0, A_plus=0.02, A_minus=0.02), weight_dependence=sim.AdditiveWeightDependence(w_min=0, w_max=0.9)) rng = NumpyRNG(seed=seed, parallel_safe=True) plastic_projection = \ sim.Projection(pre_pop, post_pop, sim.FixedProbabilityConnector( p_connect=0.5, rng=rng), synapse_type=stdp_model) # +-------------------------------------------------------------------+ # | Simulation and results | # +-------------------------------------------------------------------+ # Record spikes and neurons' potentials pre_pop.record(['v', 'spikes']) post_pop.record(['v', 'spikes']) # Run simulation sim.run(simtime) weights = plastic_projection.get('weight', 'list') pre_spikes = neo_convertor.convert_spikes(pre_pop.get_data('spikes')) post_spikes = neo_convertor.convert_spikes(post_pop.get_data('spikes')) # End simulation on SpiNNaker sim.end() return (pre_spikes, post_spikes, weights)