def create_empty_input_layers_for_scales(target: np.array, scales: [float])\
        -> Dict[float, List[Layer]]:
    """
    Creates empty input layers from the target image shape for every scale in
    the passed list.

    Parameters:
        `target`: The target image from which to compute the gabor filters and
                  create the input layers

        `scales`: A list of the scales for which to create input layers
    
    Returns:
        A dictionary which contains for each scale a list of four input layers,
        one for each orientation
    """
    input_layers = {}
    feature_names = cm.get_gabor_feature_names()
    for size in scales:
        print('Creating input layers for size', size)
        n = round(target.shape[0] * size)
        m = round(target.shape[1] * size)
        input_layers[size] = [Layer(sim.Population(n * m,
                                                   sim.IF_curr_exp(),
                                label=feature_name), (n, m))\
                              for feature_name in feature_names]
    return input_layers
def create_C2_layers(S2_layers: Dict[float, Sequence[Layer]],
                     s2_prototype_cells: int) -> List[sim.Population]:
    """
    Creates the populations of the C2 layer, one for each S2 prototype cell,
    containing only a single cell which max-pools the spikes of all layers of a
    prototype.

    Parameters:
        `S2_layers`: A dictionary containing for each scale a list of S2
                     layers, one for each prototype cell

        `s2_prototype_cells`: The number of S2 prototype cells

    Returns:
        A list of populations of size one, one population for each prototype
        cell
    """
    no_inh_w = 17.15  # synapse weight without S2 inhibitions
    with_inh_w = 4 * no_inh_w  # synapse weight with S2 inhibitions
    C2_populations = [sim.Population(1, sim.IF_curr_exp(),
                                     label=str(prot))\
                        for prot in range(s2_prototype_cells)]
    total_connections = sum(
        map(lambda ll: ll[0].shape[0] * ll[0].shape[1], S2_layers.values()))
    for s2ll in S2_layers.values():
        for prot in range(s2_prototype_cells):

            sim.Projection(
                s2ll[prot].population, C2_populations[prot],
                sim.AllToAllConnector(),
                sim.StaticSynapse(weight=with_inh_w / total_connections))
    return C2_populations
Esempio n. 3
0
def test_ticket244():
    nest = pyNN.nest
    nest.setup(threads=4)
    p1 = nest.Population(4, nest.IF_curr_exp())
    p1.record('spikes')
    poisson_generator = nest.Population(3, nest.SpikeSourcePoisson(rate=1000.0))
    conn = nest.OneToOneConnector()
    syn = nest.StaticSynapse(weight=1.0)
    nest.Projection(poisson_generator, p1.sample(3), conn, syn, receptor_type="excitatory")
    nest.run(15)
    p1.get_data()
Esempio n. 4
0
def test_ticket240():
    nest = pyNN.nest
    nest.setup(threads=4)
    parameters = {'Tau_m': 17.0}
    p1 = nest.Population(4, nest.IF_curr_exp())
    p2 = nest.Population(5, nest.native_cell_type("ht_neuron")(**parameters))
    conn = nest.AllToAllConnector()
    syn = nest.StaticSynapse(weight=1.0)
    prj = nest.Projection(p1, p2, conn, syn, receptor_type='AMPA')  # This should be a nonstandard receptor type but I don't know of one to use.
    connections = prj.get(('weight',), format='list')
    assert len(connections) > 0
def create_corner_layer_for(input_layers):
    shape = input_layers[0].shape
    total_output_neurons = np.prod(shape)

    output_population = sim.Population(total_output_neurons,
                                       sim.IF_curr_exp(),
                                       label='corner')
    for layer in input_layers:
        sim.Projection(layer.population, output_population,
                       sim.OneToOneConnector(),
                       sim.StaticSynapse(weight=1., delay=0.5))

    return Layer(output_population, shape)
Esempio n. 6
0
    def setUp(self):
        """
        Instantiates the PyNN communication and control adapter
        """
        brainconfig.rng_seed = 123456
        with LogCapture(('hbp_nrp_cle.brainsim.pynn.PyNNControlAdapter',
                        'hbp_nrp_cle.brainsim.pynn.PyNNCommunicationAdapter',
                         'hbp_nrp_cle.brainsim.common.__AbstractCommunicationAdapter')) as log_capt:
            self.control = PyNNControlAdapter(sim)
            self.assertEqual(self.control.is_alive(), False)
            self.control.initialize(timestep=0.1,
                                    min_delay=0.1,
                                    max_delay=4.0,
                                    num_threads=1)
            self.control.initialize(timestep=0.1,
                                    min_delay=0.1,
                                    max_delay=4.0,
                                    num_threads=1)
            self.communicator = PyNNNestCommunicationAdapter()
            self.neurons_cond = sim.Population(10, sim.IF_cond_exp())
            self.neurons_curr = sim.Population(10, sim.IF_curr_exp())
            self.two_neurons_pop_cond = [sim.Population(10, sim.IF_cond_exp()),
                                         sim.Population(10, sim.IF_cond_exp())]
            self.two_neurons_pop_curr = [sim.Population(10, sim.IF_curr_exp()),
                                         sim.Population(10, sim.IF_curr_exp())]
            self.three_neurons_pop_cond = [sim.Population(10, sim.IF_cond_exp()),
                                           sim.Population(10, sim.IF_cond_exp()),
                                           sim.Population(10, sim.IF_cond_exp())]

            self.assertEqual(self.communicator.is_initialized, False)
            self.assertEqual(self.communicator.detector_devices, [])
            self.assertEqual(self.communicator.generator_devices, [])
        log_capt.check(('hbp_nrp_cle.brainsim.pynn.PyNNControlAdapter', 'INFO',
                        'neuronal simulator initialized'),
                       ('hbp_nrp_cle.brainsim.pynn.PyNNControlAdapter', 'WARNING',
                        'trying to initialize an already initialized controller'))
def recognizer_weights_from(feature_np_array):
    """
    Builds a network from the firing rates of the given feature_np_array for the
    input neurons and learns the weights to recognize the image through STDP.
    """
    in_p = create_spike_source_layer_from(feature_np_array).population
    out_p = sim.Population(1, sim.IF_curr_exp(i_offset=5))
    synapse = sim.STDPMechanism(
        weight=-0.2,
        timing_dependence=sim.SpikePairRule(tau_plus=20.0,
                                            tau_minus=20.0,
                                            A_plus=0.01,
                                            A_minus=0.005),
        weight_dependence=sim.AdditiveWeightDependence(w_min=0, w_max=0.4))
    proj = sim.Projection(in_p, out_p, sim.AllToAllConnector(), synapse)
    sim.run(500)
    return proj.get('weight', 'array')
def create_output_layer(input_layer, weights_tuple, delta, layer_name, refrac):
    """
    Builds a layer which connects to the input_layer according to the given
    parameters.

    Parameters:
        `input_layer`: The input layer

        `weights_tuple`: A tuple of the form (weights, weights_shape)

        `delta`: The vertical and horizontal offset of the output layers squares
        
        `layer_name`: The name of the input layer

        `refrac`: The refractory period of the output layer neurons

    Returns:
        An output layer which is connected to the given input layer according
        to the given parameters
    """
    #    print('Number of output neurons {} for size {}x{}'.format(\
    #                                            total_output_neurons, t_n, t_m))
    n, m = how_many_squares_in_shape(input_layer.shape, weights_tuple[1],
                                     delta)
    total_output_neurons = n * m
    print('Layer:', layer_name)
    print('Output layer has shape', n, m)
    output_layer = Layer(
        sim.Population(total_output_neurons,
                       sim.IF_curr_exp(tau_refrac=refrac),
                       structure=space.Grid2D(aspect_ratio=m / n),
                       label=layer_name), (n, m))

    connect_layer_to_layer(input_layer, output_layer, weights_tuple[1], delta,
                           weights_tuple[0])

    return output_layer
def create_S2_layers(C1_layers: Dict[float, Sequence[Layer]], feature_size,
                     s2_prototype_cells, refrac_s2=.1, stdp=True,
                     inhibition=True)\
        -> Dict[float, List[Layer]]:
    """
    Creates all prototype S2 layers for all sizes.

    Parameters:
        `layers_dict`: A dictionary containing for each size a list of C1
                       layers, for each feature one

        `feature_size`:

        `s2_prototype_cells`:

        `refrac_s2`:

        `stdp`: 

    Returns:
        A dictionary containing for each size a list of different S2
        layers, for each prototype one.
    """
    f_s = feature_size
    initial_weight = 25 / (f_s * f_s)
    weight_rng = rnd.RandomDistribution('normal',
                                        mu=initial_weight,
                                        sigma=initial_weight / 20)
    i_offset_rng = rnd.RandomDistribution('normal', mu=.5, sigma=.45)
    weights = list(
        map(lambda x: weight_rng.next() * 1000, range(4 * f_s * f_s)))
    S2_layers = {}
    i_offsets = list(
        map(lambda x: i_offset_rng.next(), range(s2_prototype_cells)))
    ndicts = list(map(lambda x: {}, range(s2_prototype_cells)))
    ondicts = list(map(lambda x: {}, range(s2_prototype_cells)))
    omdicts = list(map(lambda x: {}, range(s2_prototype_cells)))
    for size, layers in C1_layers.items():
        n, m = how_many_squares_in_shape(layers[0].shape, (f_s, f_s), f_s)
        if stdp:
            l_i_offsets = [list(map(lambda x: rnd.RandomDistribution('normal',
                             mu=i_offsets[i], sigma=.25).next(), range(n * m)))\
                                for i in range(s2_prototype_cells)]
        else:
            l_i_offsets = np.zeros((s2_prototype_cells, n * m))
        print('S2 Shape', n, m)
        layer_list = list(
            map(
                lambda i: Layer(
                    sim.Population(n * m,
                                   sim.IF_curr_exp(i_offset=l_i_offsets[i],
                                                   tau_refrac=refrac_s2),
                                   structure=space.Grid2D(aspect_ratio=m / n),
                                   label=str(i)), (n, m)),
                range(s2_prototype_cells)))
        for S2_layer in layer_list:
            for C1_layer in layers:
                S2_layer.projections[C1_layer.population.label] =\
                    connect_layer_to_layer(C1_layer, S2_layer, (f_s, f_s), f_s,
                                           [[w] for w in weights[:f_s * f_s]],
                                           stdp=stdp,
                                           initial_weight=initial_weight,
                                           ndicts=ndicts, ondicts=ondicts,
                                           omdicts=omdicts)
        S2_layers[size] = layer_list
    # Set the labels of the shared connections
    if stdp:
        t = time.clock()
        print('Set shared labels')
        for s2_label_dicts in [ndicts, ondicts, omdicts]:
            for i in range(s2_prototype_cells):
                w_iter = weights.__iter__()
                for label, (source, target) in s2_label_dicts[i].items():
                    conns = nest.GetConnections(source=source, target=target)
                    nest.SetStatus(conns, {
                        'label': label,
                        'weight': w_iter.__next__()
                    })
    print('Setting labels took', time.clock() - t)
    if inhibition:
        # Create inhibitory connections between the S2 cells
        # First between the neurons of the same layer...
        inh_weight = -10
        inh_delay = .1
        print('Create S2 self inhibitory connections')
        for layer_list in S2_layers.values():
            for layer in layer_list:
                sim.Projection(
                    layer.population, layer.population,
                    sim.AllToAllConnector(allow_self_connections=False),
                    sim.StaticSynapse(weight=inh_weight, delay=inh_delay))
        # ...and between the layers
        print('Create S2 cross-scale inhibitory connections')
        for i in range(s2_prototype_cells):
            for layer_list1 in S2_layers.values():
                for layer_list2 in S2_layers.values():
                    if layer_list1[i] != layer_list2[i]:
                        sim.Projection(
                            layer_list1[i].population,
                            layer_list2[i].population, sim.AllToAllConnector(),
                            sim.StaticSynapse(weight=inh_weight,
                                              delay=inh_delay))
    if stdp:
        # Create the inhibition between different prototype layers
        print('Create S2 cross-prototype inhibitory connections')
        for layer_list in S2_layers.values():
            for layer1 in layer_list:
                for layer2 in layer_list:
                    if layer1 != layer2:
                        sim.Projection(
                            layer1.population, layer2.population,
                            sim.OneToOneConnector(),
                            sim.StaticSynapse(weight=inh_weight - 1,
                                              delay=inh_delay))
    return S2_layers
    mplt.savefig('plots/CLF/{}_{}.png'.format(results_label, appendix))


# Datastructure to store the learned weights from all epochs
all_epochs_weights = []
for training_pair, validation_pair in\
        zip(c2_training_spikes, c2_validation_spikes):

    # ============= Training ============= #
    print('Construct training network')
    sim.setup(threads=args.threads, min_delay=.1)
    # Create the C2 layer and connect it to the single output neuron
    training_spiketrains = [[s for s in st] for st in training_pair[1]]
    C2_populations, compound_C2_population =\
            create_C2_populations(training_spiketrains)
    out_p = sim.Population(1, sim.IF_curr_exp(tau_refrac=.1))
    stdp_weight = 7 / s2_prototype_cells
    stdp = sim.STDPMechanism(
        weight=stdp_weight,
        timing_dependence=sim.SpikePairRule(tau_plus=20.0,
                                            tau_minus=26.0,
                                            A_plus=stdp_weight / 5,
                                            A_minus=stdp_weight / 4.48),
        weight_dependence=sim.AdditiveWeightDependence(w_min=0.0,
                                                       w_max=15.8 *
                                                       stdp_weight))
    learn_proj = sim.Projection(compound_C2_population, out_p,
                                sim.AllToAllConnector(), stdp)

    epoch = training_pair[0]
    print('Simulating for epoch', epoch)
Esempio n. 11
0
# %% Simulation

spikes_e = make_spikes(50,100,10)
spikes_i = make_spikes(70,72,3)

# %%COBA
ge_vec, gi_vec, u_vec, s_vec = LIF_COBA(spikes_e=spikes_e, w=0.016)

# %%CUBA
I_vec, u_vec, s_vec  = LIF_CUBA(spikes=spikes_e)

# %% PyNN CUBA
# Setup
sim.setup(timestep=0.1, min_delay=0.1, max_delay=10.0)
IF_sim = sim.Population(1, sim.IF_curr_exp(), label="IF_curr_exp")
IF_sim.record('v')

spike_times = np.arange(50,100,10)
spike_input = sim.Population(1,sim.SpikeSourceArray(spike_times=spike_times),label="Input spikes")

# Connections
w = 1
connections = sim.Projection(spike_input, IF_sim,
                            connector=sim.AllToAllConnector(),
                            synapse_type=sim.StaticSynapse(weight=w,delay=0.1),
                            receptor_type="excitatory")

# Running simulation in MS
IF_sim.record('v')
sim.run(100.0)
__author__ = 'heberto'

import pyNN.nest as simulator
import pyNN.nest.standardmodels.electrodes as elect

N = 1  # Number of neurons
t = 100.0  #Simulation time

# Has to be called at the beginning of the simulation
simulator.setup(timestep=0.1, min_delay=0.1, max_delay=10)

model = simulator.IF_curr_exp()

neurons = simulator.Population(N, model)

# DC source
current = simulator.DCSource(amplitude=0.5, start=20.0, stop=80.0)
#current = elect.DCSource(amplitude=0.5, start=20.0, stop=80.0)
current.inject_into(neurons)
#neurons.inject(current)

# Record the voltage
neurons.record('v')

simulator.run(t)  # Run the simulations for t ms

simulator.end()

# Extracts the data
data = neurons.get_data()  # Crates a Neo Block
segment = data.segments[0]  # Takes the first
Esempio n. 13
0
i_offset = 0
i_offset = 0
R = 20
tau_m = 20.0
tau_refractory = 1
v_thresh = 0
v_rest = -60
tau_syn_E = 5.0
tau_syn_I = 5.0
cm = tau_m / R

# It seems that the resting potential is -65 for every neuron
model = simulator.IF_curr_exp(cm=cm,
                              i_offset=i_offset,
                              tau_m=tau_m,
                              tau_refrac=tau_refractory,
                              tau_syn_E=tau_syn_E,
                              tau_syn_I=tau_syn_I,
                              v_reset=v_rest,
                              v_thresh=v_thresh)

# Spatial structure
retinal_structure = space.Grid2D(aspect_ratio=1, dx=1.0, dy=1.0, z=0)

# Populations
retinal_neurons = simulator.Population(N_retina,
                                       model,
                                       structure=retinal_structure,
                                       label='Retina')
lgn_neurons = simulator.Population(N_lgn,
                                   model,
                                   structure=retinal_structure,