def do_run(self):
        sim.setup(1.0)
        pop = sim.Population(1, sim.IF_curr_exp(i_offset=5.0), label="pop")
        pop.record("spikes")
        pop_2 = sim.Population(1, sim.IF_curr_exp(), label="pop_2")
        proj = sim.Projection(
            pop, pop_2, sim.AllToAllConnector(),
            sim.STDPMechanism(
                timing_dependence=sim.SpikePairRule(),
                weight_dependence=sim.AdditiveWeightDependence(),
                weight=sim.RandomDistribution("uniform", low=0.3, high=0.7)))
        proj_2 = sim.Projection(
            pop, pop_2, sim.OneToOneConnector(),
            sim.STDPMechanism(
                timing_dependence=sim.SpikePairRule(),
                weight_dependence=sim.AdditiveWeightDependence(),
                weight=sim.RandomDistribution("uniform", low=0.3, high=0.7)))
        sim.run(100)
        weights_1_1 = proj.get("weight", "list")
        weights_1_2 = proj_2.get("weight", "list")
        spikes_1 = pop.get_data("spikes").segments[0].spiketrains

        sim.reset()
        sim.run(100)
        weights_2_1 = proj.get("weight", "list")
        weights_2_2 = proj_2.get("weight", "list")
        spikes_2 = pop.get_data("spikes").segments[1].spiketrains
        sim.end()

        assert(numpy.array_equal(weights_1_1, weights_2_1))
        assert(numpy.array_equal(weights_1_2, weights_2_2))
        assert(numpy.array_equal(spikes_1, spikes_2))
    def do_run(self):
        nNeurons = 2
        p.setup(timestep=1.0, min_delay=1.0)

        rng = NumpyRNG(seed=85524)
        cm = p.RandomDistribution('uniform_int', [1, 4], rng=rng)
        i_off = p.RandomDistribution('poisson', lambda_=3, rng=rng)
        tau_m = p.RandomDistribution('gamma', [1.0, 1.0], rng=rng)
        tau_re = p.RandomDistribution('vonmises', [1.0, 1.0], rng=rng)
        tau_syn_E = p.RandomDistribution('exponential', [0.1], rng=rng)
        tau_syn_I = p.RandomDistribution('binomial', [1, 0.5], rng=rng)
        v_reset = p.RandomDistribution('lognormal', [1.0, 1.0], rng=rng)
        v_rest = p.RandomDistribution('normal_clipped',
                                      [-70.0, 1.0, -72.0, -68.0],
                                      rng=rng)
        v_thresh = p.RandomDistribution('normal_clipped_to_boundary',
                                        [-55.0, 2.0, -57.0, -53.0],
                                        rng=rng)
        v_init = p.RandomDistribution('normal', [-65.0, 1.0], rng=rng)

        cell_params_lif = {
            'cm': cm,
            'i_offset': i_off,
            'tau_m': tau_m,
            'tau_refrac': tau_re,
            'tau_syn_E': tau_syn_E,
            'tau_syn_I': tau_syn_I,
            'v_reset': v_reset,
            'v_rest': v_rest,
            'v_thresh': v_thresh
        }

        pop_1 = p.Population(nNeurons,
                             p.IF_curr_exp,
                             cell_params_lif,
                             label='pop_1')

        pop_1.initialize(v=v_init)

        p.run(1)

        # All this is really checking is that values get copied correctly
        self.assertEqual(nNeurons, len(pop_1.get("cm")))
        self.assertEqual(nNeurons, len(pop_1.get("i_offset")))
        self.assertEqual(nNeurons, len(pop_1.get("tau_m")))
        self.assertEqual(nNeurons, len(pop_1.get("tau_refrac")))
        self.assertEqual(nNeurons, len(pop_1.get("tau_syn_E")))
        self.assertEqual(nNeurons, len(pop_1.get("tau_syn_I")))
        self.assertEqual(nNeurons, len(pop_1.get("v_reset")))
        self.assertEqual(nNeurons, len(pop_1.get("v_rest")))
        self.assertEqual(nNeurons, len(pop_1.get("v_thresh")))
        p.end()
    def test_run(self):
        CELL_PARAMS_LIF = {'cm': 0.25, 'i_offset': 0.0, 'tau_m': 20.0,
                           'tau_refrac': 2.0, 'tau_syn_E': 5.0,
                           'tau_syn_I': 5.0, 'v_reset': -70.0, 'v_rest': -65.0,
                           'v_thresh': -50.0}

        p.setup(timestep=1, min_delay=1.0, max_delay=144)
        pop = p.Population(1, IF_curr_exp(**CELL_PARAMS_LIF), label='pop_1')
        rng = p.NumpyRNG(seed=28375)
        v_init = p.RandomDistribution('uniform', [-60, -40], rng)
        pop.initialize(v=v_init)
        p.run(500)
Exemple #4
0
    def do_run(self):
        sim.setup(1.0)
        pre = sim.Population(2, sim.IF_curr_exp())
        post = sim.Population(2, sim.IF_curr_exp())
        nrng = sim.NumpyRNG(seed=1)

        # Test case where weights are in list and delays given by random dist
        list1 = [(0, 0, 2.1), (0, 1, 2.6), (1, 1, 3.2)]
        proj1 = sim.Projection(
            pre, post, sim.FromListConnector(list1, column_names=["weight"]),
            sim.StaticSynapse(weight=0.5,
                              delay=sim.RandomDistribution('uniform', [1, 10],
                                                           rng=nrng)))

        # Test case where delays are in list and weights given by random dist
        list2 = [(0, 0, 2), (0, 1, 6), (1, 1, 3)]
        proj2 = sim.Projection(
            pre, post, sim.FromListConnector(list2, column_names=["delay"]),
            sim.StaticSynapse(weight=sim.RandomDistribution('uniform',
                                                            [1.5, 3.5],
                                                            rng=nrng),
                              delay=4))

        sim.run(1)
        conns1 = proj1.get(["weight", "delay"], "list")
        conns2 = proj2.get(["weight", "delay"], "list")
        sim.end()

        target1 = [(0, 0, 2.1, 8), (0, 1, 2.6, 6), (1, 1, 3.2, 4)]
        target2 = [(0, 0, 1.7864, 2), (0, 1, 2.0823, 6), (1, 1, 2.4995, 3)]

        # assertAlmostEqual doesn't work on lists, so loop required
        for i in range(3):
            for j in range(2):
                self.assertAlmostEqual(conns1[i][j + 2],
                                       target1[i][j + 2],
                                       places=3)
                self.assertAlmostEqual(conns2[i][j + 2],
                                       target2[i][j + 2],
                                       places=3)
Exemple #5
0
 def test_model_fail_to_set_neuron_param_random_distribution(self):
     n_neurons = 5
     range_low = -70
     range_high = -50
     value = sim.RandomDistribution('uniform', (range_low, range_high))
     label = "pop_1"
     sim.setup(timestep=1.0)
     model = sim.IF_curr_exp(i_offset=value)
     pop_1 = sim.Population(n_neurons, model, label=label)
     values = pop_1.get('i_offset')
     for value in values:
         self.assertGreater(value, range_low)
         self.assertLess(value, range_high)
def do_run(split, seed=None):
    p.setup(1.0)

    if split:
        p.set_number_of_neurons_per_core(p.SpikeSourcePoisson, 27)
        p.set_number_of_neurons_per_core(p.IF_curr_exp, 22)

    inp = p.Population(100,
                       p.SpikeSourcePoisson(rate=100, seed=seed),
                       label="input")
    pop = p.Population(100, p.IF_curr_exp, {}, label="pop")

    p.Projection(inp,
                 pop,
                 p.OneToOneConnector(),
                 synapse_type=p.StaticSynapse(weight=5))

    pop.record("spikes")
    inp.record("spikes")

    p.run(100)

    inp.set(rate=10)
    # pop.set("cm", 0.25)
    pop.set(tau_syn_E=1)

    p.run(100)

    pop_spikes1 = pop.spinnaker_get_data('spikes')
    inp_spikes1 = inp.spinnaker_get_data('spikes')

    p.reset()

    inp.set(rate=0)
    pop.set(i_offset=1.0)
    vs = p.RandomDistribution("uniform", [-65.0, -55.0],
                              rng=NumpyRNG(seed=seed))
    pop.initialize(v=vs)

    p.run(100)

    pop_spikes2 = pop.spinnaker_get_data('spikes')
    inp_spikes2 = inp.spinnaker_get_data('spikes')

    p.end()

    return (pop_spikes1, inp_spikes1, pop_spikes2, inp_spikes2)
def run_bad_normal_clipping():
    p.setup(timestep=1.0)

    pop_1 = p.Population(4, p.IF_curr_exp(), label="pop_1")
    input = p.Population(4, p.SpikeSourceArray(spike_times=[0]), label="input")

    delays = p.RandomDistribution("normal_clipped",
                                  mu=20,
                                  sigma=1,
                                  low=1,
                                  high=6)

    p.Projection(input,
                 pop_1,
                 p.AllToAllConnector(),
                 synapse_type=p.StaticSynapse(weight=5, delay=delays))

    p.run(10)

    p.end()
Exemple #8
0
    def __create_synfire_chain(n_neurons, cell_class, cell_params,
                               use_wrap_around_connections, weight_to_spike,
                               delay, spike_times, spike_times_list,
                               placement_constraint, randomise_v_init, seed,
                               constraint, input_class, rate, start_time,
                               duration, use_spike_connections):
        """ This actually builds the synfire chain. """
        populations = list()
        projections = list()

        loop_connections = list()
        if use_wrap_around_connections:
            for i in range(0, n_neurons):
                single_connection = \
                    (i, ((i + 1) % n_neurons), weight_to_spike, delay)
                loop_connections.append(single_connection)
        else:
            for i in range(0, n_neurons - 1):
                single_connection = (i, i + 1, weight_to_spike, delay)
                loop_connections.append(single_connection)

        injection_connection = [(0, 0, weight_to_spike, 1)]

        run_count = 0
        if spike_times_list is None:
            spike_array = {'spike_times': spike_times}
        else:
            spike_array = {'spike_times': spike_times_list[run_count]}

        populations.append(
            p.Population(n_neurons, cell_class(**cell_params), label='pop_1'))

        if placement_constraint is not None:
            if len(placement_constraint) == 2:
                (x, y) = placement_constraint
                populations[0].add_placement_constraint(x=x, y=y)
            else:
                (x, y, proc) = placement_constraint
                populations[0].add_placement_constraint(x=x, y=y, p=proc)

        if randomise_v_init:
            if seed is None:
                v_init = p.RandomDistribution('uniform', [-60, -40])
            else:
                v_init = p.RandomDistribution('uniform', [-60, -40],
                                              NumpyRNG(seed=seed))
            populations[0].initialize(v=v_init)

        if constraint is not None:
            populations[0].set_constraint(constraint)

        if input_class == SpikeSourceArray:
            populations.append(
                p.Population(1, input_class(**spike_array),
                             label='inputSSA_1'))
        elif seed is None:
            populations.append(
                p.Population(1,
                             input_class(rate=rate,
                                         start=start_time,
                                         duration=duration),
                             label='inputSSP_1'))
        else:
            populations.append(
                p.Population(1,
                             input_class(rate=rate,
                                         start=start_time,
                                         duration=duration),
                             label='inputSSP_1',
                             additional_parameters={"seed": seed}))

        # handle projections
        if use_spike_connections:
            projections.append(
                p.Projection(
                    populations[0], populations[0],
                    p.FromListConnector(loop_connections),
                    p.StaticSynapse(weight=weight_to_spike, delay=delay)))

        projections.append(
            p.Projection(populations[1], populations[0],
                         p.FromListConnector(injection_connection),
                         p.StaticSynapse(weight=weight_to_spike, delay=1)))

        return populations, projections, run_count
Exemple #9
0
import numpy as np
from pyNN.utility.plotting import Figure, Panel
import pylab as plt

from functions import intercept_simulator, restore_simulator_from_file

start_time = plt.datetime.datetime.now()
v_reset = -65
v_thresh = -50
rngseed = 98766987
parallel_safe = True
rng = sim.NumpyRNG(seed=rngseed, parallel_safe=parallel_safe)

sim_time = 5000
coupling_multiplier = 2.
delay_distribution = sim.RandomDistribution('uniform', [1, 14], rng=rng)

cell_params_exc = {
    'tau_m': 20.0,
    'cm': 1.0,
    'v_rest': -65.0,
    'v_reset': -65.0,
    'v_thresh': -50.0,
    'tau_syn_E': 5.0,
    'tau_syn_I': 15.0,
    'tau_refrac': 0.3,
    'i_offset': 0
}

cell_params_inh = {
    'tau_m': 20.0,
Exemple #10
0
    mod_ampa_d2 = 0.2  ###00.156 in humphries nnet 2009

    phi_max_dop = 5  ##(Scaled within 0 to 5)
    phi_msn_dop = 0.55 * phi_max_dop
    phi_fsi_dop = 0.75 * phi_max_dop
    phi_stn_dop = 0.4 * phi_max_dop  ###(Note that this is scaled between 0 and 16.67)
    '''SETTING NETWORK CONDUCTANCE PARAMETERS'''

    g_cort2strd1 = g_ampa
    g_cort2strd2 = g_ampa * (1 - (mod_ampa_d2 * phi_msn_dop))
    g_cort2fsi = g_ampa * (1 - (mod_ampa_d2 * phi_fsi_dop))
    g_cort2stn = g_ampa * (1 - (mod_ampa_d2 * phi_stn_dop))

    #################DEFINING DISTRIBUTION OF DELAY PARAMETERS
    distr_strd1 = p.RandomDistribution("uniform", [9, 12])

    distr_strd2 = p.RandomDistribution("uniform", [9, 12])

    distr_stn = p.RandomDistribution("uniform", [9, 12])

    distr_fsi = p.RandomDistribution("uniform", [9, 12])

    pconn_cort2str = 0.15
    pconn_cort2stn = 0.2

    poplist_ch1 = [strd1_pop1, strd2_pop1, fsi1_pop1, stn_pop1]
    poplist_ch2 = [strd1_pop2, strd2_pop2, fsi1_pop2, stn_pop2]
    poplist_ch3 = [strd1_pop3, strd2_pop3, fsi1_pop3, stn_pop3]

    g_pop = [g_cort2strd1, g_cort2strd2, g_cort2fsi, g_cort2stn]
def do_run(plot):
    p.setup(timestep=1.0)

    # n_pop = 2  # 60
    nNeurons = 10  # 100

    rng = p.NumpyRNG(seed=28374)
    # rng1 = p.NumpyRNG(seed=12345)

    # delay_distr = p.RandomDistribution('uniform', [5, 10], rng)
    # weight_distr = p.RandomDistribution('uniform', [0, 2], rng1)

    v_distr = p.RandomDistribution('uniform', [-55, -95], rng)

    v_inits = []
    for i in range(nNeurons):
        v_inits.append(v_distr.next())

    cell_params_lif_in = {
        'tau_m': 32,
        'v_init': -80,
        'v_rest': -75,
        'v_reset': -95,
        'v_thresh': -55,
        'tau_syn_E': 5,
        'tau_syn_I': 10,
        'tau_refrac': 20,
        'i_offset': 1
    }

    cell_params_lif = {
        'tau_m': 32,
        'v_init': -80,
        'v_rest': -75,
        'v_reset': -95,
        'v_thresh': -55,
        'tau_syn_E': 5,
        'tau_syn_I': 10,
        'tau_refrac': 5,
        'i_offset': 0
    }

    cell_params_ext_dev = {'port': 34567}

    populations = list()
    projections = list()

    weight_to_spike = 20

    populations.append(
        p.Population(nNeurons,
                     p.IF_curr_exp(**cell_params_lif_in),
                     label='pop_%d' % 0))
    populations[0].initialize(v=v_distr)

    p.external_devices.activate_live_output_for(populations[0])

    pop_external = p.Population(
        nNeurons,
        p.external_devices.SpikeInjector(**cell_params_ext_dev),
        label='Babel_Dummy')

    populations.append(
        p.Population(nNeurons,
                     p.IF_curr_exp(**cell_params_lif),
                     label='pop_%d' % 1))

    projections.append(
        p.Projection(pop_external, populations[1], p.OneToOneConnector(),
                     p.StaticSynapse(weight=weight_to_spike, delay=10)))

    # populations[0].record_v()
    #  at the moment is only possible to observe one population per core
    populations[1].record(['v'])

    for pop in populations:
        pop.record(['spikes'], to_file=False)
        # sends spike to the Monitoring application

    #    populations[i].record_variable('rate', save_to='eth')
    #  sends spike to the Monitoring application

    p.run(10000)

    # retrieving spike results and plotting...

    id_accumulator = 0

    shapes = []

    if plot:
        import matplotlib.pyplot as p_plot
    for pop_o in populations:
        data = numpy.asarray(
            neo_convertor.convert_spikes(pop_o.get_data('spikes')))
        print(data.shape)
        shapes.append(data.shape)
        if plot:
            p_plot.scatter(data[:, 0],
                           data[:, 1] + id_accumulator,
                           color='green',
                           s=1)
        id_accumulator = id_accumulator + pop_o.size

    if plot:
        p_plot.show()

    return shapes