コード例 #1
0
 def testAllToAll(self):
     for srcP in [self.source5, self.source22, self.target33]:
         for tgtP in [self.target6, self.target33]:
             if srcP == tgtP:
                 prj = sim.Projection(
                     srcP, tgtP,
                     sim.AllToAllConnector(allow_self_connections=False,
                                           weights=1.234))
             else:
                 prj = sim.Projection(srcP, tgtP,
                                      sim.AllToAllConnector(weights=1.234))
             weights = prj._connections.W.toarray().flatten().tolist()
             self.assertEqual(weights, [1.234] * len(prj))
コード例 #2
0
def setup_2_layers_4_units_ff_net():
    configure_scheduling()
    pynnn.setup()
    Tns.p1 = pynnn.Population(4,
                              pynnn.IF_curr_alpha,
                              structure=pynnn.space.Grid2D())
    Tns.p2 = pynnn.Population(4,
                              pynnn.IF_curr_alpha,
                              structure=pynnn.space.Grid2D())
    Tns.prj1_2 = pynnn.Projection(
        Tns.p1,
        Tns.p2,
        pynnn.AllToAllConnector(allow_self_connections=False),
        target='excitatory')
    Tns.prj1_2.set("weight", 1)
    Tns.max_weight = 34
    Tns.rore1_update_p = 10
    Tns.rore1_win_width = 200
    Tns.rore2_update_p = 10
    Tns.rore2_win_width = 200
    Tns.rore1 = RectilinearOutputRateEncoder(Tns.p1, 2, 2, Tns.rore1_update_p,
                                             Tns.rore1_win_width)
    Tns.rore2 = RectilinearOutputRateEncoder(Tns.p2, 2, 2, Tns.rore2_update_p,
                                             Tns.rore2_win_width)
    common.pynn_utils.POP_ADAPT_DICT[(
        Tns.p1, common.pynn_utils.RectilinearOutputRateEncoder)] = Tns.rore1
    common.pynn_utils.POP_ADAPT_DICT[(
        Tns.p2, common.pynn_utils.RectilinearOutputRateEncoder)] = Tns.rore2
    enable_recording(Tns.p1, Tns.p2)
    schedule_output_rate_calculation(Tns.p1)
    schedule_output_rate_calculation(Tns.p2)
コード例 #3
0
ファイル: pynn_utils_tests.py プロジェクト: GQI7FS6/pycogmo
def setup_pynn_populations_with_full_connectivity():
    pynnn.setup()
    Tns.p1 = pynnn.Population(4,
                              pynnn.IF_curr_alpha,
                              structure=pynnn.space.Grid2D())
    Tns.p2 = pynnn.Population(4,
                              pynnn.IF_curr_alpha,
                              structure=pynnn.space.Grid2D())
    Tns.prj1_2 = pynnn.Projection(
        Tns.p1,
        Tns.p2,
        pynnn.AllToAllConnector(allow_self_connections=False),
        target='excitatory')
コード例 #4
0
ファイル: pynn_utils_tests.py プロジェクト: GQI7FS6/pycogmo
def setup_pynn_populations():
    pynnn.setup()
    Tns.p1 = pynnn.Population(64,
                              pynnn.IF_curr_alpha,
                              structure=pynnn.space.Grid2D())
    Tns.p2 = pynnn.Population(64,
                              pynnn.IF_curr_alpha,
                              structure=pynnn.space.Grid2D())
    Tns.prj1_2 = pynnn.Projection(
        Tns.p1,
        Tns.p2,
        pynnn.AllToAllConnector(allow_self_connections=False),
        target='excitatory')
    # Weights in nA as IF_curr_alpha uses current-based synapses
    Tns.prj1_2.set("weight", 1)
    Tns.max_weight = 33
コード例 #5
0
def setup_and_fill_adapter():
    setup_adapter()
    Tns.pop_size = 27
    Tns.pynn_pop1 = pynnn.Population(Tns.pop_size, pynnn.IF_cond_alpha)
    Tns.ids1 = [int(u) for u in Tns.pynn_pop1.all()]
    Tns.pynn_pop2 = pynnn.Population(Tns.pop_size, pynnn.IF_cond_alpha,
                                 structure=pynnn.space.Grid3D())
    Tns.ids2 = [int(u) for u in Tns.pynn_pop2.all()]
    A.add_pynn_population(Tns.pynn_pop1)
    Tns.pop2_alias = "testmap"
    A.add_pynn_population(Tns.pynn_pop2, alias=Tns.pop2_alias)
    Tns.pynn_proj1 = pynnn.Projection(Tns.pynn_pop1, Tns.pynn_pop2,
                                  pynnn.OneToOneConnector())
    Tns.pynn_proj2 = pynnn.Projection(Tns.pynn_pop2, Tns.pynn_pop1,
                                  pynnn.AllToAllConnector())
    A.add_pynn_projection(Tns.pynn_pop1, Tns.pynn_pop2,
                          Tns.pynn_proj1)
    A.add_pynn_projection(Tns.pynn_pop2, Tns.pynn_pop1,
                          Tns.pynn_proj2)
コード例 #6
0
# syn = sim.StaticSynapse(weight = 0.05, delay = 0.5)
depressing_synapse_ee = sim.TsodyksMarkramSynapse(weight=0.05,
                                                  delay=0.2,
                                                  U=0.5,
                                                  tau_rec=800.0,
                                                  tau_facil=0.01)
facilitating_synapse_ee = sim.TsodyksMarkramSynapse(weight=0.05,
                                                    delay=0.5,
                                                    U=0.04,
                                                    tau_rec=100.0,
                                                    tau_facil=1000)
static_synapse = sim.StaticSynapse(weight=0.05, delay=0.5)

Input_E_connection = sim.Projection(Excinp,
                                    Pexc,
                                    sim.AllToAllConnector(),
                                    static_synapse,
                                    receptor_type='excitatory')

E_E_connection = sim.Projection(Pexc,
                                Pexc,
                                sim.FixedProbabilityConnector(p_connect=0.5),
                                depressing_synapse_ee,
                                receptor_type='excitatory')

Excinp.record('spikes')
# Excinp[1].record('v')
Pexc.record('spikes')
Pexc[5:6].record('v')

for i in range(no_run):
コード例 #7
0
Excinp = sim.Population(10, sim.SpikeSourcePoisson(rate = 20.0, start = 0, duration = run_time * no_run))
# cell_type_parameters = {'tau_refrac': 0.1, 'v_thresh': -50.0, 'tau_m': 20.0, 'tau_syn_E': 0.5, 'v_rest': -65.0,\
						# 'cm': 1.0, 'v_reset': -65.0, 'tau_syn_I': 0.5, 'i_offset': 0.0}
# print(sim.IF_curr_alpha.default_parameters)

# cell_type = sim.IF_cond_exp(**cell_type_parameters) # neuron type of population
Pexc = sim.Population(10, sim.EIF_cond_exp_isfa_ista(), label = "excitotary neurons")
# Pexc.set(tau_refrac = 0.1, v_thresh = -50.0, tau_m = 20.0, tau_syn_E = 0.5, v_rest = -65.0, \
# 		cm = 1.0, v_reset = -65, tau_syn_I = 0.5, i_offset = 0.0)
# Pexc.initialize(**cell_type_parameters)
# print Pexc.celltype.default_initial_values
# print Pexc.get('tau_m')
# syn = sim.StaticSynapse(weight = 0.05, delay = 0.5)
# depressing_synapse_ee = sim.TsodyksMarkramSynapse(weight = 0.05, delay = 0.2, U = 0.5, tau_rec = 800.0, tau_facil = 0.01)
facilitating_synapse_ee = sim.TsodyksMarkramSynapse(weight = 0.05, delay = 0.5, U = 0.04, tau_rec = 100.0, tau_facil = 1000)
connection = sim.Projection(Excinp, Pexc, sim.AllToAllConnector(), facilitating_synapse_ee, receptor_type = 'excitatory')


# E_E_connection = sim.Projection(Pexc, Pexc, sim.FixedProbabilityConnector(p_connect = 0.5), depressing_synapse_ee, receptor_type = 'excitatory')

Excinp.record('spikes')
# Excinp[1].record('v')
Pexc.record('spikes')
Pexc[5:6].record('v')

for i in range(no_run):
	sim.run_until(run_time * (i + 1))
	print('the time is %.1f' %(run_time * (i + 1)))
	spikes = Excinp.get_data()
	spike = Pexc.get_data()
	# print connection.get('weight',format = 'array')
コード例 #8
0
ファイル: simpletask.py プロジェクト: luxizh/spikingNN_pre
def train(label, untrained_weights=None):
    organisedStim = {}
    labelSpikes = []
    spikeTimes = generate_data(label)

    for i in range(output_size):
        labelSpikes.append([])
    labelSpikes[label] = [int(max(max(spikeTimes))) + 1]

    if untrained_weights == None:
        untrained_weights = RandomDistribution('uniform',
                                               low=wMin,
                                               high=wMaxInit).next(input_size *
                                                                   output_size)
        #untrained_weights = RandomDistribution('normal_clipped', mu=0.1, sigma=0.05, low=wMin, high=wMaxInit).next(input_size*output_size)
        untrained_weights = np.around(untrained_weights, 3)
        #saveWeights(untrained_weights, 'untrained_weightssupmodel1traj')
        print("init!")

    print "length untrained_weights :", len(untrained_weights)

    if len(untrained_weights) > input_size:
        training_weights = [[0 for j in range(output_size)]
                            for i in range(input_size)
                            ]  #np array? size 1024x25
        k = 0
        #for i in untrained_weights:
        #    training_weights[i[0]][i[1]]=i[2]
        for i in range(input_size):
            for j in range(output_size):
                training_weights[i][j] = untrained_weights[k]
                k += 1
    else:
        training_weights = untrained_weights

    connections = []
    for n_pre in range(input_size):  # len(untrained_weights) = input_size
        for n_post in range(
                output_size
        ):  # len(untrained_weight[0]) = output_size; 0 or any n_pre
            connections.append((n_pre, n_post, training_weights[n_pre][n_post],
                                __delay__))  #index
    runTime = int(max(max(spikeTimes))) + 100
    #####################
    sim.setup(timestep=1)
    #def populations
    layer1 = sim.Population(input_size,
                            sim.SpikeSourceArray, {'spike_times': spikeTimes},
                            label='inputspikes')
    layer2 = sim.Population(output_size,
                            sim.IF_curr_exp,
                            cellparams=cell_params_lif,
                            label='outputspikes')
    supsignal = sim.Population(output_size,
                               sim.SpikeSourceArray,
                               {'spike_times': labelSpikes},
                               label='supersignal')

    #def learning rule
    stdp = sim.STDPMechanism(
        #weight=untrained_weights,
        #weight=0.02,  # this is the initial value of the weight
        #delay="0.2 + 0.01*d",
        timing_dependence=sim.SpikePairRule(tau_plus=tauPlus,
                                            tau_minus=tauMinus,
                                            A_plus=aPlus,
                                            A_minus=aMinus),
        #weight_dependence=sim.MultiplicativeWeightDependence(w_min=wMin, w_max=wMax),
        weight_dependence=sim.AdditiveWeightDependence(w_min=wMin, w_max=wMax),
        dendritic_delay_fraction=0)
    #def projections

    stdp_proj = sim.Projection(layer1,
                               layer2,
                               sim.FromListConnector(connections),
                               synapse_type=stdp)
    inhibitory_connections = sim.Projection(
        layer2,
        layer2,
        sim.AllToAllConnector(allow_self_connections=False),
        synapse_type=sim.StaticSynapse(weight=inhibWeight, delay=__delay__),
        receptor_type='inhibitory')
    stim_proj = sim.Projection(supsignal,
                               layer2,
                               sim.OneToOneConnector(),
                               synapse_type=sim.StaticSynapse(
                                   weight=stimWeight, delay=__delay__))

    layer1.record(['spikes'])

    layer2.record(['v', 'spikes'])
    supsignal.record(['spikes'])
    sim.run(runTime)

    print("Weights:{}".format(stdp_proj.get('weight', 'list')))

    weight_list = [
        stdp_proj.get('weight', 'list'),
        stdp_proj.get('weight', format='list', with_address=False)
    ]
    neo = layer2.get_data(["spikes", "v"])
    spikes = neo.segments[0].spiketrains
    v = neo.segments[0].filter(name='v')[0]
    neostim = supsignal.get_data(["spikes"])
    print(label)
    spikestim = neostim.segments[0].spiketrains
    neoinput = layer1.get_data(["spikes"])
    spikesinput = neoinput.segments[0].spiketrains

    plt.close('all')
    pplt.Figure(pplt.Panel(v,
                           ylabel="Membrane potential (mV)",
                           xticks=True,
                           yticks=True,
                           xlim=(0, runTime)),
                pplt.Panel(spikesinput,
                           xticks=True,
                           yticks=True,
                           markersize=2,
                           xlim=(0, runTime)),
                pplt.Panel(spikestim,
                           xticks=True,
                           yticks=True,
                           markersize=2,
                           xlim=(0, runTime)),
                pplt.Panel(spikes,
                           xticks=True,
                           xlabel="Time (ms)",
                           yticks=True,
                           markersize=2,
                           xlim=(0, runTime)),
                title="Training" + str(label),
                annotations="Training" +
                str(label)).save('plot/' + str(trylabel) + str(label) +
                                 '_training.png')
    #plt.hist(weight_list[1], bins=100)
    #plt.show()
    plt.close('all')
    print(wMax)
    '''
    plt.hist([weight_list[1][0:input_size], weight_list[1][input_size:input_size*2], weight_list[1][input_size*2:]], bins=20, label=['neuron 0', 'neuron 1', 'neuron 2'], range=(0, wMax))
    plt.title('weight distribution')
    plt.xlabel('Weight value')
    plt.ylabel('Weight count')
    '''
    #plt.show()
    #plt.show()

    sim.end()
    for i in weight_list[0]:
        #training_weights[int(i[0])][int(i[1])]=float(i[2])
        weight_list[1][int(i[0]) * output_size + int(i[1])] = i[2]
    return weight_list[1]
コード例 #9
0
                               {'spike_times': []})
    spikeSourcePlastic = sim.Population(1, sim.SpikeSourceArray,
                                        {'spike_times': stimulusPlastic})
assert (spikeSourceStim != None)
assert (spikeSourcePlastic != None)

# configure stdp
stdp = sim.STDPMechanism(weight = 0.2,  # this is the initial value of the weight
                         timing_dependence = sim.SpikePairRule(tau_plus = 20.0, tau_minus = 20.0,
                                                               A_plus = 0.01, A_minus = 0.012),\
 weight_dependence = sim.AdditiveWeightDependence(w_min = 0, w_max = 0.04))

# connect stimulus
sim.Projection(spikeSourceStim,
               neuron,
               sim.AllToAllConnector(),
               sim.StaticSynapse(weight=0.04, delay=timingPrePostStim),
               receptor_type='excitatory')

# create plastic synapse
prj = sim.Projection(spikeSourcePlastic, neuron, sim.AllToAllConnector(), stdp)
weightBefore = prj.get('weight', format='list')
prj.set(weight=0.15)
print weightBefore
neuron.record('spikes')

lastInputSpike = np.max(np.concatenate((stimulus, stimulusPlastic)))
runtime = lastInputSpike + stimulusOffset

sim.run(runtime)
コード例 #10
0
    weight=0.02,  # this is the initial value of the weight
    #delay="0.2 + 0.01*d",
    timing_dependence=sim.SpikePairRule(tau_plus=20.0,
                                        tau_minus=20.0,
                                        A_plus=0.01,
                                        A_minus=0.012),
    weight_dependence=sim.AdditiveWeightDependence(w_min=0, w_max=0.04),
    dendritic_delay_fraction=0)
#Error: The pyNN.brian backend does not currently support dendritic delays:
# for the purpose of STDP calculations all delays are assumed to be axonal
#for brian dendritic_delay_fraction=0 default value 1.0
'''
Connection algorithms
'''

connector = sim.AllToAllConnector(allow_self_connections=False)  # no autapses
#default True

connector = sim.OneToOneConnector()

#Connecting neurons with a fixed probability
connector = sim.FixedProbabilityConnector(p_connect=0.2)

#Connecting neurons with a position-dependent probability
DDPC = sim.DistanceDependentProbabilityConnector
connector = DDPC("exp(-d)")
connector = DDPC("d<3")
#The constructor requires a string d_expression, which should be a distance expression,
# as described above for delays, but returning a probability (a value between 0 and 1)

#Divergent/fan-out connections
コード例 #11
0
ファイル: attention_net.py プロジェクト: GQI7FS6/pycogmo
def main():
    ## Uninteresting setup, start up the visu process,...
    logfile = make_logfile_name()
    ensure_dir(logfile)
    f_h = logging.FileHandler(logfile)
    f_h.setLevel(SUBDEBUG)
    d_h = logging.StreamHandler()
    d_h.setLevel(INFO)
    utils.configure_loggers(debug_handler=d_h, file_handler=f_h)
    parent_conn, child_conn = multiprocessing.Pipe()
    p = multiprocessing.Process(target=visualisation.visualisation_process_f,
                                name="display_process",
                                args=(child_conn, LOGGER))
    p.start()

    pynnn.setup(timestep=SIMU_TIMESTEP)
    init_logging("logfile", debug=True)
    LOGGER.info("Simulation started with command: %s", sys.argv)

    ## Network setup
    # First population
    p1 = pynnn.Population(100,
                          pynnn.IF_curr_alpha,
                          structure=pynnn.space.Grid2D())
    p1.set({'tau_m': 20, 'v_rest': -65})
    # Second population
    p2 = pynnn.Population(20,
                          pynnn.IF_curr_alpha,
                          cellparams={
                              'tau_m': 15.0,
                              'cm': 0.9
                          })
    # Projection 1 -> 2
    prj1_2 = pynnn.Projection(
        p1,
        p2,
        pynnn.AllToAllConnector(allow_self_connections=False),
        target='excitatory')
    # I may need to make own PyNN Connector class. Otherwise, this is
    # neat:  exponentially decaying probability of connections depends
    # on distance. Distance is only calculated using x and y, which
    # are on a toroidal topo with boundaries at 0 and 500.
    connector = pynnn.DistanceDependentProbabilityConnector(
        "exp(-abs(d))",
        space=pynnn.Space(axes='xy',
                          periodic_boundaries=((0, 500), (0, 500), None)))
    # Alternately, the powerful connection set algebra (python CSA
    # module) can be used.
    weight_distr = pynnn.RandomDistribution(distribution='gamma',
                                            parameters=[1, 0.1])
    prj1_2.randomizeWeights(weight_distr)

    # This one is in NEST but not in Brian:
    # source = pynnn.NoisyCurrentSource(
    #     mean=100, stdev=50, dt=SIMU_TIMESTEP,
    #     start=10.0, stop=SIMU_DURATION, rng=pynnn.NativeRNG(seed=100))
    source = pynnn.DCSource(start=10.0, stop=SIMU_DURATION, amplitude=100)
    source.inject_into(list(p1.sample(50).all()))

    p1.record(to_file=False)
    p2.record(to_file=False)

    ## Build and send the visualizable network structure
    adapter = pynn_to_visu.PynnToVisuAdapter(LOGGER)
    adapter.add_pynn_population(p1)
    adapter.add_pynn_population(p2)
    adapter.add_pynn_projection(p1, p2, prj1_2.connection_manager)
    adapter.commit_structure()

    parent_conn.send(adapter.output_struct)

    # Number of chunks to run the simulation:
    n_chunks = SIMU_DURATION // SIMU_TO_VISU_MESSAGE_PERIOD
    last_chunk_duration = SIMU_DURATION % SIMU_TO_VISU_MESSAGE_PERIOD
    # Run the simulator
    for visu_i in xrange(n_chunks):
        pynnn.run(SIMU_TO_VISU_MESSAGE_PERIOD)
        parent_conn.send(adapter.make_activity_update_message())
        LOGGER.debug("real current p1 spike counts: %s",
                     p1.get_spike_counts().values())
    if last_chunk_duration > 0:
        pynnn.run(last_chunk_duration)
        parent_conn.send(adapter.make_activity_update_message())
    # Cleanup
    pynnn.end()
    # Wait for the visualisation process to terminate
    p.join(VISU_PROCESS_JOIN_TIMEOUT)