def setupLayerInput(params, spikeSourceVrResponsePath, spikeSourceActiveClassPath, populationsInput,learning): #Create a population, one neuron per VR, #where each neuron wil be loaded with the rate code spikes for the VR response over the training and/or test set spikeData = utils.readSpikeSourceDataFile(spikeSourceVrResponsePath) numVR = params['NUM_VR'] numRatecodeNeurons = numVR popRateCodeSpikes = spynnaker.Population(numRatecodeNeurons, spynnaker.SpikeSourceArray, spikeData, label='popRateCodeSpikes') populationsInput.append(popRateCodeSpikes) if learning: #Create a population, one neuron per class, #During training the neuron representing the current class will be active with significant spikes, the others will be quiet #The purpose is to innervate the relevant ouptut class cluster/population so that fire-together-wire-together hebbian learning (via STDP) stregthens synapses from active PN clusters #During testing all these neurons will be silent, leaving the strengthened synapses to trigger activity direct from PN layer in the correct ouptpu cluster spikeData = utils.readSpikeSourceDataFile(spikeSourceActiveClassPath) numNeurons = params['NUM_CLASSES'] popClassActivationSpikes = spynnaker.Population(numNeurons, spynnaker.SpikeSourceArray, spikeData, label='popClassActivationSpikes') populationsInput.append(popClassActivationSpikes) else: #create an orphan dummy popn of 1 neuron to take the place of the now unused spike source pop used in learning #This is to ensure that the freed up core does not get co-opted by the PN layer config routine # as this would makae the learning and testing configurations different in PN which would likely make the saved PNAN weight arrays incorrect popClassActivationSpikes = spynnaker.Population(1, neuronModel, cell_params, label='dummy_popClassActivationSpikes') populationsInput.append(popClassActivationSpikes)
def test(spikeTimes, trained_weights,label): #spikeTimes = extractSpikes(sample) runTime = int(max(max(spikeTimes)))+100 ########################################## sim.setup(timestep=1) pre_pop = sim.Population(input_size, sim.SpikeSourceArray, {'spike_times': spikeTimes}, label="pre_pop") post_pop = sim.Population(output_size, sim.IF_curr_exp , cell_params_lif, label="post_pop") if len(trained_weights) > input_size: weigths = [[0 for j in range(output_size)] for i in range(input_size)] #np array? size 1024x25 k=0 for i in range(input_size): for j in range(output_size): weigths[i][j] = trained_weights[k] k += 1 else: weigths = trained_weights connections = [] #k = 0 for n_pre in range(input_size): # len(untrained_weights) = input_size for n_post in range(output_size): # len(untrained_weight[0]) = output_size; 0 or any n_pre #connections.append((n_pre, n_post, weigths[n_pre][n_post]*(wMax), __delay__)) connections.append((n_pre, n_post, weigths[n_pre][n_post]*(wMax)/max(trained_weights), __delay__)) # #k += 1 prepost_proj = sim.Projection(pre_pop, post_pop, sim.FromListConnector(connections), synapse_type=sim.StaticSynapse(), receptor_type='excitatory') # no more learning !! #inhib_proj = sim.Projection(post_pop, post_pop, sim.AllToAllConnector(), synapse_type=sim.StaticSynapse(weight=inhibWeight, delay=__delay__), receptor_type='inhibitory') # no more lateral inhib post_pop.record(['v', 'spikes']) sim.run(runTime) neo = post_pop.get_data(['v', 'spikes']) spikes = neo.segments[0].spiketrains v = neo.segments[0].filter(name='v')[0] f1=pplt.Figure( # plot voltage pplt.Panel(v, ylabel="Membrane potential (mV)", xticks=True, yticks=True, xlim=(0, runTime+100)), # raster plot pplt.Panel(spikes, xlabel="Time (ms)", xticks=True, yticks=True, markersize=2, xlim=(0, runTime+100)), title='Test with label ' + str(label), annotations='Test with label ' + str(label) ) f1.save('plot/'+str(trylabel)+str(label)+'_test.png') f1.fig.texts=[] print("Weights:{}".format(prepost_proj.get('weight', 'list'))) weight_list = [prepost_proj.get('weight', 'list'), prepost_proj.get('weight', format='list', with_address=False)] #predict_label= sim.end() return spikes
def create_inputs(self): # two inputs spike_array0 = {"spike_times": [[10.0]]} spike_array1 = {"spike_times": [[50.0]]} spike_gen0 = spinn.Population(1, spinn.SpikeSourceArray, spike_array0, label="inputSpikes_0") spike_gen1 = spinn.Population(1, spinn.SpikeSourceArray, spike_array1, label="inputSpikes_1") return [spike_gen0, spike_gen1]
def live_spike_receive_translated(self): self.stored_data = list() db_conn = DatabaseConnection(local_port=None) db_conn.add_database_callback(self.database_callback) p.setup(1.0) p.set_number_of_neurons_per_core(p.SpikeSourceArray, 5) pop = p.Population( 25, p.SpikeSourceArray([[1000 + (i * 10)] for i in range(25)])) p.external_devices.activate_live_output_for( pop, translate_keys=True, database_notify_port_num=db_conn.local_port, tag=1, use_prefix=True, key_prefix=self.PREFIX, prefix_type=EIEIOPrefix.UPPER_HALF_WORD) p.run(1500) p.end() self.listener.close() self.conn.close() self.assertGreater(len(self.stored_data), 0) for key, time in self.stored_data: self.assertEqual(key >> 16, self.PREFIX) self.assertEqual(1000 + ((key & 0xFFFF) * 10), time)
def setupLayer_KC(): ''' ┌────── KC_cell_0001 ├────── KC_cell_0002 ┌──────> PN_cell_[i] KC ─────┼────── KC_cell_0003 <────┼──────> ... ├────── .... └──────> PN_cell_[k] └────── KC_cell_2000 Each KC neuron map to around ~6 PN_cells which was chosen randomly from 784 of all besides, by the property of SpiNNaker Board (each core 256 neurons MAX) 2000 KC_neurons will spreads to around ~10 cores ''' NUM_KC_CELLS = 2000 NEURON_PARAMS = { 'cm': 0.25, 'i_offset': 0.0, 'tau_m': 20.0, 'tau_refrac': 0.0, 'tau_syn_E': 10.0, 'tau_syn_I': 10.0, 'v_reset': -70.0, 'v_rest': -65.0, 'v_thresh': -64.0 } kc_population = spynnaker.Population(NUM_KC_CELLS, spynnaker.IF_curr_exp, NEURON_PARAMS, label='KC_population') return kc_population
def setupLayerRN(params, neuronModel, cell_params, injectionPopulations, popPoissionNoiseSource, populationsRN): #create a single RN population divided into virtual clusters one per VR #this will be fed by the noise population and modulated by the relevant ratecoded neuron #to create a rate coded population numVR = params['NUM_VR'] rnClusterSize = int(params['CLUSTER_SIZE']) #* params['NETWORK_SCALE'] rnPopSize = rnClusterSize * numVR popName = 'popRN' popRN = spynnaker.Population(rnPopSize, neuronModel, cell_params, label=popName) populationsRN.append(popRN) #connect one random poisson neuron to each RN neuron weight = params['WEIGHT_POISSON_TO_CLUSTER_RN'] delay = params['DELAY_POISSON_TO_CLUSTER_RN'] connections = utils.fromList_OneRandomSrcForEachTarget(popPoissionNoiseSource._size,popRN._size,weight,delay) projPoissonToClusterRN = spynnaker.Projection(popPoissionNoiseSource, popRN, spynnaker.FromListConnector(connections), target='excitatory') vr = 0 for injectionPopn in injectionPopulations: connections = list() for fromNeuronIdx in range(injectionPopn._size): #connect the correct VR ratecode neuron in popRateCodeSpikes to corresponding subsection (cluster) of the RN population weight = params['WEIGHT_RATECODE_TO_CLUSTER_RN'] firstIndex = vr * rnClusterSize lastIndex = firstIndex + rnClusterSize - 1 connections += utils.fromList_SpecificNeuronToRange(fromNeuronIdx,firstIndex,lastIndex,weight,params['MIN_DELAY_RATECODE_TO_CLUSTER_RN'],params['MAX_DELAY_RATECODE_TO_CLUSTER_RN']) vr = vr + 1 #after the last neuron in the current injection pop, create a projection to the RN projRateToClusterRN = spynnaker.Projection(injectionPopn, popRN, spynnaker.FromListConnector(connections), target='excitatory') print 'Added projection to RN of ', len(connections), " connections from injection pop ", injectionPopn.label, "(size ", injectionPopn._size,")"
def setupLayerInput(params,rnSpikeInjectionPort,rnSpikeInjectionPopLabel, classActivationSpikeInjectionPort,classActivationSpikeInjectionPopLabel, populationsInput,learning): #Create a spike injection population, one neuron per VR, #where each neuron wil be externally spiked in realtime according to the rate code for the VR response over the training and/or test set #Returns number of spike injection populations that were needed (max size applies) if learning: #Create a population, one neuron per class, #During training the neuron representing the current class will be actively spiking, the others will be quiet #The purpose is to innervate the relevant ouptut class cluster/population so that fire-together-wire-together hebbian learning (via STDP) stregthens synapses from active PN clusters #During testing all these neurons will be silent, leaving the strengthened synapses to trigger activity direct from PN layer in the correct ouptpu cluster numNeurons = params['NUM_CLASSES'] #popClassActivationSpikes = spynnaker.Population(numNeurons, spynnaker.SpikeSourceArray, spikeData, label='popClassActivationSpikes') popClassActivationSpikeInjection = spynnaker.Population(numNeurons,ExternalDevices.SpikeInjector,{'port': classActivationSpikeInjectionPort},label=classActivationSpikeInjectionPopLabel) populationsInput.append(popClassActivationSpikeInjection) else: #create an orphan dummy popn of 1 neuron to take the place of the now unused class activation input used in learning #This is to ensure that the freed up core does not get co-opted by the PN layer config routine # as this would makae the learning and testing configurations different in PN which would likely make the saved PNAN weight arrays incorrect popClassActivationSpikes = spynnaker.Population(1, neuronModel, cell_params, label='dummy_popClassActivationSpikes') populationsInput.append(popClassActivationSpikes) sizes = list() numVR = params['NUM_VR'] max = params['MAX_SIZE_SPIKE_INJECTION_POP'] size = 0 for vr in range(numVR): size = size + 1 if size == max: sizes.append(size) size = 0 if (size > 0): #put remiander in last one sizes.append(size) popIdx = 0 for sz in sizes: popRnSpikeInjection = spynnaker.Population(sz,ExternalDevices.SpikeInjector,{'port': rnSpikeInjectionPort+popIdx},label=(rnSpikeInjectionPopLabel+str(popIdx))) #spynnaker.set_number_of_neurons_per_core(SpikeInjector, numRatecodeNeurons) populationsInput.append(popRnSpikeInjection) popIdx = popIdx + 1 return len(sizes) #number of spike injection populations needed
def setupLayerNoiseSource(params, simTime, populationsNoiseSource): #create a single "noise" population that with be used to to generate rate-coded RN populations noiseRateHz = params['RN_NOISE_RATE_HZ'] params_poisson_noise= {'rate': noiseRateHz,'start':0,'duration':simTime} numPoissonNeurons = params['RN_NOISE_SOURCE_POP_SIZE'] * params['NETWORK_SCALE'] popPoissionNoiseSource = spynnaker.Population(numPoissonNeurons, spynnaker.SpikeSourcePoisson, params_poisson_noise , label='popPoissionNoiseSource') populationsNoiseSource.append(popPoissionNoiseSource)
def setupLayerAN(params, settings, neuronModel, cell_params, popClassActivation, popPoissionNoiseSource, populationsPN, populationsAN,learning,projectionsPNAN): #create an Association Neuron AN cluster population per class #this will be fed by: #1) PN clusters via plastic synapses #2) Class activation to innervate the correct AN cluster for a given input #3) laterally inhibit between AN clusters numClasses = params['NUM_CLASSES'] anClusterSize = int(params['CLUSTER_SIZE']) #* params['NETWORK_SCALE'] for an in range(numClasses): popName = 'popClusterAN_' + str(an) ; popClusterAN = spynnaker.Population(anClusterSize, neuronModel, cell_params, label=popName) populationsAN.append(popClusterAN) #connect neurons in every PN popn to x% (e.g 50%) neurons in this AN cluster for pn in range(len(populationsPN)): if learning: projLabel = 'Proj_PN' + str(pn) + '_AN' + str(an) projClusterPNToClusterAN = connectClusterPNtoAN(params,populationsPN[pn],popClusterAN,float(settings['OBSERVATION_EXPOSURE_TIME_MS']),projLabel) projectionsPNAN.append(projClusterPNToClusterAN) #keep handle to use later for saving off weights at end of learning else: #Without plasticity, create PNAN FromList connectors using weights saved during learning stage connections = utils.loadListFromFile(getWeightsFilename(settings,'PNAN',pn,an)) #print 'Loaded weightsList[',pn,',',an,']',connections tupleList = utils.createListOfTuples(connections) #new version only accepts list of tuples not list of lists #print 'tupleList[',pn,',',an,']',tupleList conn = spynnaker.FromListConnector(tupleList) projClusterPNToClusterAN = spynnaker.Projection(populationsPN[pn], popClusterAN,conn, target='excitatory') if learning: #use the class activity input neurons to create correlated activity during learining in the corresponding class cluster weight = params['WEIGHT_CLASS_EXCITATION_TO_CLUSTER_AN'] connections = utils.fromList_SpecificNeuronToAll(an,anClusterSize,weight,params['MIN_DELAY_CLASS_ACTIVITY_TO_CLUSTER_AN'],params['MAX_DELAY_CLASS_ACTIVITY_TO_CLUSTER_AN']) projClassActivityToClusterAN = spynnaker.Projection(popClassActivation, popClusterAN, spynnaker.FromListConnector(connections), target='excitatory') else: #testing #send spikes on these outputs back to correct host port , these will be used to determine winner etc anHostReceivePort = int(settings['AN_HOST_RECEIVE_PORT']) ExternalDevices.activate_live_output_for(popClusterAN,port=anHostReceivePort) #connect each AN cluster to inhibit every other AN cluster utils.createInterPopulationWTA(populationsAN,params['WEIGHT_WTA_AN_AN'],params['DELAY_WTA_AN_AN'],float(params['CONNECTIVITY_WTA_AN_AN'])) #inhibit other non-corresponding class clusters if learning: weight = params['WEIGHT_CLASS_INHIBITION_TO_CLUSTER_AN'] for activeCls in range(numClasses): connections = utils.fromList_SpecificNeuronToAll(activeCls,anClusterSize,weight,params['MIN_DELAY_CLASS_ACTIVITY_TO_CLUSTER_AN'],params['MAX_DELAY_CLASS_ACTIVITY_TO_CLUSTER_AN']) for an in range(numClasses): if an != activeCls: projClassActivityToClusterAN = spynnaker.Projection(popClassActivation, populationsAN[an], spynnaker.FromListConnector(connections), target='inhibitory')
def load_assembly(path, filename): """Load the populations in an assembly. Loads the populations in an assembly that was saved with the `save_assembly` function. The term "assembly" refers to pyNN internal nomenclature, where ``Assembly`` is a collection of layers (``Populations``), which in turn consist of a number of neurons (``cells``). Parameters ---------- path: str Path to directory where to load model from. filename: str Name of file to load model from. Returns ------- layers: list[pyNN.Population] List of pyNN ``Population`` objects. """ import sys filepath = os.path.join(path, filename) assert os.path.isfile(filepath), \ "Spiking neuron layers were not found at specified location." if sys.version_info < (3, ): s = cPickle.load(open(filepath, 'rb')) else: s = cPickle.load(open(filepath, 'rb'), encoding='bytes') # Iterate over populations in assembly layers = [] for label in s['labels']: celltype = getattr(sim, s[label]['celltype']) population = sim.Population(s[label]['size'], celltype, celltype.default_parameters, structure=s[label]['structure'], label=label) # Set the rest of the specified variables, if any. for variable in s['variables']: if getattr(population, variable, None) is None: setattr(population, variable, s[label][variable]) if label != 'InputLayer': population.set(i_offset=s[label]['i_offset']) layers.append(population) return layers
def generate_data(): spikesTrain = [] organisedData = {} for i in range(input_class): for j in range(input_len): neuid = (i, j) organisedData[neuid] = [] for i in range(input_len): for j in range(output_size): neuid = (j, i) organisedData[neuid].append(j * input_len * v_co * 5 + i * v_co) organisedData[neuid].append(j * input_len * v_co * 5 + input_len * v_co * 1 + i * v_co) organisedData[neuid].append(j * input_len * v_co * 5 + input_len * v_co * 2 + i * v_co) organisedData[neuid].append(j * input_len * v_co * 5 + input_len * v_co * 3 + i * v_co) organisedData[neuid].append(j * input_len * v_co * 5 + input_len * v_co * 4 + i * v_co) organisedData[neuid].append(input_len * v_co * (3 * 5 + j) + i * v_co) #organisedData[neuid].append(i*v_co+2) # if neuid not in organisedData: # organisedData[neuid]=[i*v_co] # else: # organisedData[neuid].append(i*v_co) for i in range(input_class): for j in range(input_len): neuid = (i, j) organisedData[neuid].sort() spikesTrain.append(organisedData[neuid]) runTime = int(max(max(spikesTrain))) sim.setup(timestep=1) noise = sim.Population(input_size, sim.SpikeSourcePoisson(), label='noise') noise.record(['spikes']) #noise sim.run(runTime) neonoise = noise.get_data(["spikes"]) spikesnoise = neonoise.segments[0].spiketrains #noise sim.end() for i in range(input_size): for noisespike in spikesnoise[i]: spikesTrain[i].append(noisespike) spikesTrain[i].sort() return spikesTrain
def setupLayer_PN(time_space): ''' PN ─┬─── pn_neuron_01 ├─── pn_neuron_02 ├─── pn_neuron_03 ├─── ... └─── pn_neuron_100 PN was used as input layer ''' input_population = spynnaker.Population(NUM_PN_CELLS, spynnaker.SpikeSourceArray(spike_times=time_space), label='PN_population') return input_population
def allocate_monitoring_cores(db): """ Little PACMAN for monitors. Monitoring core will always be allocated even if it's not used so monitoring can be switched on on run time. Monitoring core is mapped in the processor table with STATUS='MONITORING'. only works with 1 core now """ import pyNN.spiNNaker as p # needed for populations p.simulator.set_db(db) # using db as simulator.db_run probes = db.get_probes() probes = [i for i in probes if i['save_to'] == 'eth'] # will only get the ethernet probes # creating population monitoring_pop = p.Population(1, p.Recorder, {}, label='app_monitoring') monitoring_pop_id = monitoring_pop.id # creating projections for probe in probes: db.insert_monitoring_projection(probe['population_id'], monitoring_pop_id) # creating part_population monitoring_part_pop_id = db.insert_part_population( monitoring_pop_id, 1, 0) # insert the population in the part_population table monitoring_pop.set_mapping_constraint({'x': 0, 'y': 0}) # update core_group_id and map group_id = db.generic_select('max(processor_group_id) as g', 'part_populations')[0]['g'] + 1 db.update_part_popoulation_group(monitoring_part_pop_id, group_id, position_in_group=0) db.set_part_population_core_offset(group_id) monitoring_processor = db.generic_select( 'id', 'processors WHERE status = \'MONITORING\'')[0][ 'id'] # will get only the first one db.insert_group_into_map( monitoring_processor, group_id) # will pick the first available processor # part_projections for projection in db.get_presynaptic_populations(monitoring_pop_id): for pre_part_population in db.get_part_populations( population_id=projection['presynaptic_population_id'] ): # gets every child part_population from the presynaptic_population db.insert_monitoring_part_projection(projection['id'], pre_part_population['id'], monitoring_part_pop_id)
def setupLayer_PN(time_space): ''' PN ─┬─── pn_neuron_01 ├─── pn_neuron_02 ├─── pn_neuron_03 ├─── ... └─── pn_neuron_784 ''' NUM_PN_CELLS = 784 ''' 784只PN神经元放在 ''' input_population = spynnaker.Population( NUM_PN_CELLS, spynnaker.SpikeSourceArray(spike_times=time_space), label='PN_population') return input_population
def setupLayer_KC(): ''' ┌────── KC_cell_0001 ├────── KC_cell_0002 ┌──────> PN_cell_[i] KC ─────┼────── KC_cell_0003 <────┼──────> ... ├────── .... └──────> PN_cell_[k] └────── KC_cell_2000 1.Each KC neuron map to around ~20 PN_cells which was chosen randomly from 100 of all 2.By the property of SpiNNaker Board. Each core contains MAX 256 neurons. Hence 2000 KC_neurons will spreads to around ~10 cores ''' kc_population = spynnaker.Population(NUM_KC_CELLS, spynnaker.IF_curr_exp(), label='KC_population') return kc_population
def setupLayerAN(params, settings, neuronModel, cell_params, popClassActivation, popPoissionNoiseSource, populationsPN, populationsAN,learning,projectionsPNAN): #create an Association Neuron AN cluster population per class #this will be fed by: #1) PN clusters via plastic synapses #2) Class activation to innervate the correct AN cluster for a given input #3) laterally inhibit between AN clusters numClasses = params['NUM_CLASSES'] anClusterSize = params['CLUSTER_SIZE'] * params['NETWORK_SCALE'] for an in range(numClasses): popName = 'popClusterAN_' + str(an) ; popClusterAN = spynnaker.Population(anClusterSize, neuronModel, cell_params, label=popName) populationsAN.append(popClusterAN) #connect neurons in every PN popn to x% (e.g 50%) neurons in this AN cluster for pn in range(len(populationsPN)): if learning: projLabel = 'Proj_PN' + str(pn) + '_AN' + str(an) projClusterPNToClusterAN = connectClusterPNtoAN(params,populationsPN[pn],popClusterAN,projLabel) projectionsPNAN.append(projClusterPNToClusterAN) #keep handle to use later for saving off weights at end of learning else: #Without plasticity, create PNAN FromList connectors using weights saved during learning stage connections = utils.loadListFromFile(getWeightsFilename(settings,'PNAN',pn,an)) #print 'Loaded weightsList[',pn,',',an,']',connections projClusterPNToClusterAN = spynnaker.Projection(populationsPN[pn], popClusterAN,spynnaker.FromListConnector(connections), target='excitatory') if learning: #use the class activity input neurons to create correlated activity during learining in the corresponding class cluster weight = params['WEIGHT_CLASS_ACTIVITY_TO_CLUSTER_AN'] connections = utils.fromList_SpecificNeuronToAll(an,anClusterSize,weight,params['MIN_DELAY_CLASS_ACTIVITY_TO_CLUSTER_AN'],params['MAX_DELAY_CLASS_ACTIVITY_TO_CLUSTER_AN']) projClassActivityToClusterAN = spynnaker.Projection(popClassActivation, popClusterAN, spynnaker.FromListConnector(connections), target='excitatory') #connect each AN cluster to inhibit every other AN cluster utils.createInterPopulationWTA(populationsAN,params['WEIGHT_WTA_AN_AN'],params['DELAY_WTA_AN_AN'],float(params['CONNECTIVITY_WTA_AN_AN']))
import pyNN.spiNNaker as sim import pyNN.utility.plotting as plot import matplotlib.pyplot as plt import threading from random import uniform from time import sleep from pykeyboard import PyKeyboard sim.setup(timestep=1.0) sim.set_number_of_neurons_per_core(sim.IF_curr_exp, 100) input1 = sim.Population(6, sim.external_devices.SpikeInjector(), label="stateSpikeInjector") pre_pop = sim.Population(6, sim.IF_curr_exp(tau_syn_E=100, tau_refrac=50), label="statePopulation") post_pop = sim.Population(1, sim.IF_curr_exp(), label="actorPopulation") sim.external_devices.activate_live_output_for(pre_pop, database_notify_host="localhost", database_notify_port_num=19996) sim.external_devices.activate_live_output_for(input1, database_notify_host="localhost", database_notify_port_num=19998) timing_rule = sim.SpikePairRule(tau_plus=20.0, tau_minus=20.0, A_plus=0.5, A_minus=0.5)
'i_offset': 0.0, 'tau_m': 20.0, 'tau_refrac': 2.0, 'tau_syn_E': 5.0, 'tau_syn_I': 5.0, 'v_reset': -70.0, 'v_rest': -65.0, 'v_thresh': -60.0 } #extpop=p.Population(1,p.ExternalSpikeSource,{'virtual_chip_coords': {'x':254,'y':255}},label='ext spikes') # fefffe80.00000100 # set setpoint: @FEFFFE80.00000100 # set setpoint: @FEFFFE80.00000000 testpop = p.Population(200, p.IF_curr_exp, cell_params_lif, label='ifcurr') testpop.record() pois1 = p.Population( 32, p.SpikeSourceRemote, { 'max_rate': 100, 'min_rate': 0.1, 'overlap': 2e-8, 'sensormin': 0, 'sensormax': 8191, 'src_type': 'rbf_pois' }) pois1.record() #errorprop=p.Projection(myopop,pois1,p.OneToOneConnector(weights=1.0,delays=1.0))
'e_rev_I': -80.0, 'v_thresh': -52.0, 'tau_syn_E': 1.54919333848, 'v_rest': -70.0, 'tau_syn_I': 12.0062483732, 'v_reset': -70.0, } cell_params = cellparams_pclayer #noisepop = p.Population(nn,p.SpikeSourcePoisson,{'rate':100.,'start':5.,'duration':100.}) #prepop = p.Population(nn,p.SpikeSourceArray,{'spike_times':[[i for i in arange(0,duration,800)], # [i for i in arange(0,duration,80)]]*(nn/2)}) #prepop = p.Population(nn,p.SpikeSourceArray,{'spike_times':[[i for i in arange(0,duration*j/(nn+1),100)] for j in range(nn)]}) prepop = p.Population(nn_pre, p.SpikeSourcePoisson, { 'rate': 3.3, 'duration': duration }) #prepop.record() #teachpop = p.Population(nn,p.SpikeSourceArray,{'spike_times':[[i for i in arange(250,0.8*duration,100)], #]}) # [i for i in arange(250,0.8*duration,100)]]*(nn/2)}) #teachpop = p.Population(nn,p.SpikeSourceArray,{'spike_times':[[i f) teachpop = p.Population(nn_teach, p.SpikeSourcePoisson, { 'rate': 100, 'duration': duration }) #teachpop.record() postpop = p.Population(nn_post, p.IF_cond_exp, cell_params) #postpop.record()
def train(spikeTimes,untrained_weights=None): organisedStim = {} labelSpikes = [] #spikeTimes = generate_data() #for j in range(5): # labelSpikes #labelSpikes[label] = [(input_len-1)*v_co+1,(input_len-1)*v_co*2+1,(input_len-1)*v_co*3+1,] if untrained_weights == None: untrained_weights = RandomDistribution('uniform', low=wMin, high=wMaxInit).next(input_size*output_size) #untrained_weights = RandomDistribution('normal_clipped', mu=0.1, sigma=0.05, low=wMin, high=wMaxInit).next(input_size*output_size) untrained_weights = np.around(untrained_weights, 3) #saveWeights(untrained_weights, 'untrained_weightssupmodel1traj') print ("init!") print "length untrained_weights :", len(untrained_weights) if len(untrained_weights)>input_size: training_weights = [[0 for j in range(output_size)] for i in range(input_size)] #np array? size 1024x25 k=0 for i in range(input_size): for j in range(output_size): training_weights[i][j] = untrained_weights[k] k += 1 else: training_weights = untrained_weights connections = [] for n_pre in range(input_size): # len(untrained_weights) = input_size for n_post in range(output_size): # len(untrained_weight[0]) = output_size; 0 or any n_pre connections.append((n_pre, n_post, training_weights[n_pre][n_post], __delay__)) #index runTime = int(max(max(spikeTimes))/3)+100 ##################### sim.setup(timestep=1) #def populations layer1=sim.Population(input_size,sim.SpikeSourceArray, {'spike_times': spikeTimes},label='inputspikes') layer2=sim.Population(output_size,sim.IF_curr_exp,cellparams=cell_params_lif,label='outputspikes') #supsignal=sim.Population(output_size,sim.SpikeSourceArray, {'spike_times': labelSpikes},label='supersignal') #def learning rule stdp = sim.STDPMechanism( weight=untrained_weights, #weight=0.02, # this is the initial value of the weight #delay="0.2 + 0.01*d", timing_dependence=sim.SpikePairRule(tau_plus=tauPlus, tau_minus=tauMinus,A_plus=aPlus, A_minus=aMinus), #weight_dependence=sim.MultiplicativeWeightDependence(w_min=wMin, w_max=wMax), weight_dependence=sim.AdditiveWeightDependence(w_min=wMin, w_max=wMax), #weight_dependence=sim.AdditiveWeightDependence(w_min=0, w_max=0.4), dendritic_delay_fraction=1.0) #def projections #stdp_proj = sim.Projection(layer1, layer2, sim.FromListConnector(connections), synapse_type=stdp) stdp_proj = sim.Projection(layer1, layer2, sim.AllToAllConnector(), synapse_type=stdp) inhibitory_connections = sim.Projection(layer2, layer2, sim.AllToAllConnector(allow_self_connections=False), synapse_type=sim.StaticSynapse(weight=inhibWeight, delay=__delay__), receptor_type='inhibitory') #stim_proj = sim.Projection(supsignal, layer2, sim.OneToOneConnector(), # synapse_type=sim.StaticSynapse(weight=stimWeight, delay=__delay__)) layer1.record(['spikes']) layer2.record(['v','spikes']) #supsignal.record(['spikes']) sim.run(runTime) print("Weights:{}".format(stdp_proj.get('weight', 'list'))) weight_list = [stdp_proj.get('weight', 'list'), stdp_proj.get('weight', format='list', with_address=False)] neo = layer2.get_data(["spikes", "v"]) spikes = neo.segments[0].spiketrains v = neo.segments[0].filter(name='v')[0] #neostim = supsignal.get_data(["spikes"]) #spikestim = neostim.segments[0].spiketrains neoinput= layer1.get_data(["spikes"]) spikesinput = neoinput.segments[0].spiketrains plt.close('all') pplt.Figure( pplt.Panel(spikesinput,xticks=True, yticks=True, markersize=2, xlim=(0,runTime),xlabel='(a) Spikes of Input Layer'), #pplt.Panel(spikestim, xticks=True, yticks=True, markersize=2, xlim=(0,runTime),xlabel='(c) Spikes of Supervised Layer'), pplt.Panel(spikes, xticks=True, xlabel="(b) Spikes of Output Layer", yticks=True, markersize=2, xlim=(0,runTime)), pplt.Panel(v, ylabel="Membrane potential (mV)", xticks=True, yticks=True, xlim=(0,runTime),xlabel='(c) Membrane Potential of Output Layer\nTime (ms)'), title="Two Training", annotations="Twoway Training" ).save('SNN_DVS_un/plot_for_twoway/'+str(trylabel)+'_training.png') #plt.hist(weight_list[1], bins=100) plt.close('all') plt.hist([weight_list[1][0:input_size], weight_list[1][input_size:input_size*2], weight_list[1][input_size*2:]], bins=20, label=['neuron 0', 'neuron 1', 'neuron 2'], range=(0, wMax)) plt.title('weight distribution') plt.xlabel('Weight value') plt.ylabel('Weight count') #plt.show() #plt.show() sim.end() return weight_list[1]
try: import pyNN.spiNNaker as p except Exception as e: import spynnaker8 as p # set up the tools p.setup(timestep=1.0, min_delay=1.0, max_delay=32.0) # set up the virtual chip coordinates for the motor connected_chip_coords = {'x': 0, 'y': 0} link = 4 populations = list() projections = list() input_population = p.Population(6, p.SpikeSourcePoisson(rate=10)) control_population = p.Population(6, p.IF_curr_exp()) motor_device = p.Population( 6, p.external_devices.MunichMotorDevice(spinnaker_link_id=0)) p.Projection(input_population, control_population, p.OneToOneConnector(), synapse_type=p.StaticSynapse(weight=5.0)) p.external_devices.activate_live_output_to(control_population, motor_device) p.run(1000) p.end()
sim.setup(timestep=1.0, min_delay=1.0) # create cells cell_params = { 'cm': 0.25, 'tau_m': 10.0, 'tau_refrac': 2.0, 'tau_syn_E': 2.5, 'tau_syn_I': 2.5, 'v_reset': -70.0, 'v_rest': -65.0, 'v_thresh': -55.0 } neurons = sim.Population(100, sim.IF_cond_exp(**cell_params)) inputs = sim.Population(100, sim.SpikeSourcePoisson(rate=0.0)) # set input firing rates as a linear function of cell index input_firing_rates = np.linspace(0.0, 1000.0, num=inputs.size) inputs.set(rate=input_firing_rates) # create one-to-one connections wiring = sim.OneToOneConnector() static_synapse = sim.StaticSynapse(weight=0.1, delay=2.0) connections = sim.Projection(inputs, neurons, wiring, static_synapse) # configure recording neurons.record('spikes') # run simulation
} populations = list() projections = list() weight_to_spike = 2.0 delay = 17 loopConnections = list() for i in range(0, nNeurons): singleConnection = (i, ((i + 1) % nNeurons), weight_to_spike, delay) loopConnections.append(singleConnection) injectionConnection = [(0, 0, weight_to_spike, 1)] spikeArray = {'spike_times': [[0, 1050]]} populations.append(p.Population(nNeurons, p.IF_curr_exp, cell_params_lif, label='pop_1')) populations.append(p.Population(1, p.SpikeSourceArray, spikeArray, label='inputSpikes_1')) projections.append(p.Projection(populations[0], populations[0], p.FromListConnector(loopConnections))) projections.append(p.Projection(populations[1], populations[0], p.FromListConnector(injectionConnection))) populations[0].record_v() populations[0].record_gsyn() populations[0].record() p.run(runtime) v = None
pre_phase = 1 # Otherwise, take into account axonal delay else: # Pre after post if t > 0: post_phase = 1 pre_phase = t # Post after pre else: post_phase = 1 - t pre_phase = 0 sim_time = max(sim_time, (num_pairs * time_between_pairs) + abs(t)) # Neuron populations pre_pop = sim.Population(1, model(**cell_params)) post_pop = sim.Population(1, model, cell_params) # Stimulating populations pre_times = [i for i in range(pre_phase, sim_time, time_between_pairs)] post_times = [i for i in range(post_phase, sim_time, time_between_pairs)] pre_stim = sim.Population( 1, sim.SpikeSourceArray(spike_times=[pre_times])) post_stim = sim.Population( 1, sim.SpikeSourceArray(spike_times=[post_times])) weight = 0.035 # Connections between spike sources and neuron populations ee_connector = sim.OneToOneConnector() sim.Projection(
sample_time = 10e-3 extparams = { 'virtual_chip_coords': virtual_chip_coords, 'connected_chip_coords': connected_chip_coords, 'connected_chip_edge': link, 'kernel_amplitude': sqrt(2e-3 / sample_time), 'output_scale': 1., 'decay_factor': exp(-sample_time / tau), 'sample_time': sample_time * 1e3, 'threshold': 0, 'motorID': 0x110, 'monitorID': 0x120 } myopop = p.Population(100, p.MyoRobotMotorControl, extparams.copy(), label='myoext') extparams['motorID'] = 0x115 extparams['monitorID'] = 0x125 myopop2 = p.Population(100, p.MyoRobotMotorControl, extparams.copy(), label='myoext') #extpop=p.Population(1,p.ExternalSpikeSource,{'virtual_chip_coords': {'x':254,'y':255}},label='ext spikes') # fefffe80.00000100 # set setpoint: @FEFFFE80.00000100 # set setpoint: @FEFFFE80.00000000
def test_snn(randomness = False, data_dir = "data/X_test_zied.npy", cls_dir = "data/y_test_zied.npy", data = "load", # pass data as argument cls = "load"): # pass labels as argument ############################################################################### ## Function Definitions ############################################################################### def gaussian(x, mu, sig): return np.float16(np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))) def calc_pop_code(feature, rng1, rng2, num): interval = np.float(rng2 - rng1) / num means = np.arange(rng1 + interval,rng2 + interval, interval) pop_code = [gaussian(feature, mu, 0.025) for mu in means] return pop_code def PoissonTimes2(t_str=0., t_end=100., rate=10., seed=1.): times = [t_str] rng = np.random.RandomState(seed=seed) cont = True while cont == True: t_next = np.floor(times[-1] + 1000. * next_spike_times(rng, rate)) if t_next < t_end - 30: times.append(t_next[0]) else: cont = False return times[1:] def PoissonTimes(t_str=0., t_end=100., rate=10., seed=1.): if rate > 0: interval = (t_end - t_str+0.) / rate times = np.arange(t_str + 30, t_end - 40, interval) return list(times) else: return [] def next_spike_times(rng,rate): return -np.log(1.0 - rng.rand(1)) / rate def ismember(a, b): b = [b] bind = {} for i, elt in enumerate(b): if elt not in bind: bind[elt] = i aa=[bind.get(itm, -1) for itm in a] return sum(np.array(aa) + 1.) ############################################################################### ## Parameters ############################################################################### # Load Parameter parameters = np.load("output_files/parameters1.npy") parameters = parameters.item() # Load test data if data == "load" and cls == "load": data = np.load(data_dir) cls = np.load(cls_dir) # Simulation Parameters trial_num = parameters["trial_num"] # How many samples (trials) from data will be presented n_trials = len(cls)#10#20 #int(trial_num) # Total trials time_int_trials = parameters["time_int_trials"] # (ms) Time to present each trial data SIM_TIME = n_trials * time_int_trials # Total simulation time (ms) ts = parameters["ts"] # Timestep of Spinnaker (ms) min_del = ts max_del = 144 * ts p.setup(timestep=ts, min_delay=min_del, max_delay=max_del) ## Neuron Numbers n_feature = parameters["n_feature"] # Number of features n_pop = parameters["n_pop"] # Number of neurons in one population n_cl = parameters["n_cl"] # Number of classes at the output ## Connection Parameters # Weights wei_src_enc = parameters["wei_src_enc"] # From Source Array at input to Encoding Layer(Exc) wei_enc_filt = parameters["wei_enc_filt"] # From Encoding Layer to Filtering Layer Exc neurons (Exc) wei_filt_inh = parameters["wei_filt_inh"] # From Filtering Layer Inh neurons to Exc neurons (Inh) wei_cls_exc = parameters["wei_cls_exc"] # From Output Layer Exc neurons to Inh neurons (Exc) wei_cls_inh = parameters["wei_cls_inh"] # From Output Layer Inh neurons to Exc neurons (Inh) wei_noise_poi = parameters["wei_noise_poi"] # Delays del_src_enc = np.load("output_files/parameters2.npy") del_enc_filt = parameters["del_enc_filt"] del_init_stdp = parameters["del_init_stdp"] del_cls_exc = parameters["del_cls_exc"] del_cls_inh = parameters["del_cls_inh"] del_noise_poi = parameters["del_noise_poi"] # Firing Rates noise_poi_rate = parameters["noise_poi_rate"] max_fr_input = parameters["max_fr_input"] # maximum firing rate at the input layer max_fr_rate_output = parameters["max_fr_rate_output"] # Maximum firing rate at output (supervisory signal) ## Connection Probabilities prob_filt_inh = parameters["prob_filt_inh"] # Prob of connectivity inhi-connections at Filtering Layer prob_stdp = parameters["prob_stdp"] # Probability of STDP connections prob_output_inh = parameters["prob_output_inh"] # Prob of inhi-connections at Output Layer prob_noise_poi_conn = parameters["prob_noise_poi_conn"] ## STDP Parameters tau_pl = parameters["tau_pl"] #5 tau_min = tau_pl stdp_w_max = parameters["stdp_w_max"] stdp_w_min = parameters["stdp_w_min"] stdp_A_pl = parameters["stdp_A_pl"] stdp_A_min = -stdp_A_pl # minus in order to get symmetric curve ## Neuron Parameters cell_params_lif = {'cm': 1., 'i_offset': 0.0, 'tau_m': 20., 'tau_refrac': 2.0, 'tau_syn_E': 5.0, 'tau_syn_I': 5.0, 'v_reset': -70.0, 'v_rest': -65.0, 'v_thresh': -65.0 } ############################################################################### ## Data Extraction ############################################################################### ## Extract Feature Data scale_data = parameters["scale_data"] # Scale features into [0-scale_data] range #data = np.load("features_without_artifact.npy") #data = np.load('X_test.npy') r, c = np.shape(data) # Threshold (to keep spikes amplitudes in range) thr_data_plus = parameters["thr_data_plus"] thr_data_minus = parameters["thr_data_minus"] data_rates = np.reshape(data, (1, r * c))[0] # Shift an normalize the data #dd = [d if d<thr_data_plus else thr_data_plus for d in data_rates] #dd = [d if d>thr_data_minus else thr_data_minus for d in dd] #dd2 = np.array(dd) - min(dd) #dd2 = dd2 / max(dd2) * 2 dd2 = np.array(data_rates) - min(data_rates) dd2 = dd2 / max(dd2) * 2 new_data_rates = [] for r in dd2: new_data_rates += calc_pop_code(r, 0., scale_data, n_feature / (n_pop + 0.0)) data_rates = list(max_fr_input * np.array(new_data_rates)) ## Extract Class Data #cls = np.load("classes_without_artifact.npy") #cls = np.load("y_test.npy") cls = cls.reshape(len(cls), 1) r_cl, c_cl = np.shape(cls) cls = list(np.reshape(cls, (1, r_cl * c_cl))[0]) outputs = cls[:n_trials] poi_rate = data_rates[:n_feature * n_trials] t1 = 0#70 t2 = int(t1 + n_trials) outputs = cls[t1:t2] poi_rate = data_rates[t1 * n_feature:n_feature * t2] ############################################################################### ## Create populations for different layers ############################################################################### poi_layer = [] enc_layer = [] filt_layer_exc = [] out_layer_exc = [] out_layer_inh = [] # Calculate poisson spike times for features spike_times = [[] for i in range(n_feature)] for i in range(n_trials): t_st = i * time_int_trials t_end = t_st + time_int_trials ind = i * n_feature for j in range(n_feature): times = PoissonTimes(t_st, t_end, poi_rate[ind+j], np.random.randint(100)) for t in times: spike_times[j].append(t) if randomness == True: # if True: calculate "spike_times" (randomly) new # if False: load previously saved "spike_times" np.save('output_files/spike_times_test.npy', spike_times) else: spike_times = np.load('output_files/spike_times_test.npy') # Spike source of input layer spike_source = p.Population(n_feature, p.SpikeSourceArray, {'spike_times':spike_times}, label='spike_source') enc_layer = p.Population(n_feature * n_pop, p.IF_curr_exp, cell_params_lif, label='enc_layer') filt_layer = p.Population(n_feature * n_pop, p.IF_curr_exp, cell_params_lif, label='filt_layer') #filt_layer_inh=p.Population(n_feature*n_pop, p.IF_curr_exp, cell_params_lif, label='filt_layer_inh') for i in range(n_cl): out_layer_exc.append(p.Population(n_pop, p.IF_curr_exp, cell_params_lif, label='out_layer_exc{}'.format(i))) out_layer_inh.append(p.Population(n_pop, p.IF_curr_exp, cell_params_lif, label='out_layer_inh{}'.format(i))) out_layer_exc[i].record() poisson_input = p.Population(n_pop * 2, p.SpikeSourcePoisson, {"rate":noise_poi_rate}) enc_layer.record() filt_layer.record() ############################################################################### ## Projections ############################################################################### ## Connection List from Spike Source Array to Encoding Layer conn_inp_enc = np.load("output_files/conn_inp_enc.npy") #Connection List for Filtering Layer Inhibitory conn_filt_inh = np.load("output_files/conn_filt_inh.npy") ## STDP Connection List conn_stdp_list = np.load("output_files/conn_stdp_list.npy") diff_ind = np.load("output_files/diff_ind_filt.npy") diff_ind2 = np.load("output_files/diff_ind_filt2.npy") diff_thr2 = np.load("output_files/diff_thr2.npy") c1 = 0 for cls_list in conn_stdp_list: c2 = 0 cls_wei = np.load("output_files/stdp_weights{}.npy".format(c1)) mx = max(cls_wei) for conn in cls_list: # if ismember(diff_ind,conn[0]): if (ismember(diff_ind2,conn[0]) and np.sign(c1-0.5) * np.sign(diff_thr2[int(conn[0])]) == -1.): # conn[2]=0.08*cls_wei[c2]/mx conn[2] = 0.08#*diff_thr2[conn[0]]/36. # conn[2]=2.*cls_wei[c2] c2 += 1 c1 += 1 conn_stdp_list = list(conn_stdp_list) ## Output Layer Inhibitory Connection List conn_output_inh = np.load("output_files/conn_output_inh.npy") ## Spike Source to Encoding Layer p.Projection(spike_source,enc_layer,p.FromListConnector(conn_inp_enc)) ## Encoding Layer to Filtering Layer p.Projection(enc_layer, filt_layer, p.OneToOneConnector(weights=wei_enc_filt, delays=del_enc_filt)) ## Filtering Layer Inhibitory p.Projection(filt_layer,filt_layer, p.FromListConnector(conn_filt_inh), target="inhibitory") ## STDP Connection between Filtering Layer and Output Layer stdp_proj = [] for j in range(n_cl): stdp_proj.append(p.Projection(filt_layer, out_layer_exc[j], p.FromListConnector(conn_stdp_list[j]))) ## Connection between Output Layer neurons c = 0 for i in range(n_cl): p.Projection(out_layer_exc[i], out_layer_inh[i], p.OneToOneConnector(weights=wei_cls_exc, delays=del_cls_exc)) iter_array = [j for j in range(n_cl) if j != i] for j in iter_array: p.Projection(out_layer_inh[i], out_layer_exc[j], p.FromListConnector(conn_output_inh[c]), target="inhibitory") c+=1 ## Noisy poisson connection to encoding layer if randomness == True: # if True: connect noise to network # if False: don't use noise in network p.Projection(poisson_input, enc_layer, p.FixedProbabilityConnector(p_connect=prob_noise_poi_conn, weights=wei_noise_poi, delays = del_noise_poi)) ############################################################################### ## Simulation ############################################################################### p.run(SIM_TIME) Enc_Spikes = enc_layer.getSpikes() Filt_Exc_Spikes = filt_layer.getSpikes() #Filt_Inh_Spikes = filt_layer_inh.getSpikes() Out_Spikes = [[] for i in range(n_cl)] for i in range(n_cl): Out_Spikes[i] = out_layer_exc[i].getSpikes() p.end() ############################################################################### ## Plot ############################################################################### ## Plot 1 if 0: pylab.figure() pylab.xlabel('Time (ms)') pylab.ylabel('Neuron ID') pylab.title('Encoding Layer Raster Plot') pylab.hold(True) pylab.plot([i[1] for i in Enc_Spikes], [i[0] for i in Enc_Spikes], ".b") pylab.hold(False) #pylab.axis([-10,c*SIM_TIME+100,-1,numInp+numOut+numInp+3]) pylab.show() ## Plot 2-1 if 0: pylab.figure() pylab.xlabel('Time (ms)') pylab.ylabel('Neuron ID') pylab.title('Filtering Layer Raster Plot') pylab.plot([i[1] for i in Filt_Exc_Spikes], [i[0] for i in Filt_Exc_Spikes], ".b") #pylab.axis([-10,c*SIM_TIME+100,-1,numInp+numOut+numInp+3]) pylab.show() ## Plot 2-2 pylab.figure() pylab.xlabel('Time (ms)') pylab.ylabel('Neuron ID') pylab.title('Filtering Layer Raster Plot') pylab.hold(True) pylab.plot([i[1] for i in Filt_Exc_Spikes], [i[0] for i in Filt_Exc_Spikes], ".b") time_ind=[i*time_int_trials for i in range(len(outputs))] for i in range(len(time_ind)): pylab.plot([time_ind[i],time_ind[i]],[0,2000],"r") pylab.hold(False) #pylab.axis([-10,c*SIM_TIME+100,-1,numInp+numOut+numInp+3]) pylab.show() ## Plot 3-1 if 0: pylab.figure() pylab.xlabel('Time (ms)') pylab.ylabel('Neuron ID') pylab.title('Association Layer Raster Plot\nTest for Trial Numbers {}-{}'.format(t1,t2)) pylab.hold(True) c=0 for array in Out_Spikes: pylab.plot([i[1] for i in array], [i[0]+c for i in array], ".b") c+=0.2 time_cls=[j*time_int_trials+i for j in range(len(outputs)) for i in range(int(time_int_trials))] cls_lb=[outputs[j]+0.4 for j in range(len(outputs)) for i in range(int(time_int_trials))] time_ind=[i*time_int_trials for i in range(len(outputs))] for i in range(len(time_ind)): pylab.plot([time_ind[i],time_ind[i]],[0,10],"r") #pylab.plot(time_cls,cls_lb,".") pylab.hold(False) pylab.axis([-10,SIM_TIME+100,-1,n_pop+2]) pylab.show() ## Plot 3-2 pylab.figure() pylab.xlabel('Time (ms)') pylab.ylabel('Neuron ID') pylab.title(('Association Layer Raster Plot\n', 'Test for Samples {}-{}').format(t1,t2)) pylab.hold(True) pylab.plot([i[1] for i in Out_Spikes[0]], [i[0] for i in Out_Spikes[0]], ".b") pylab.plot([i[1] for i in Out_Spikes[1]], [i[0] + 0.2 for i in Out_Spikes[1]], ".r") time_ind = [i * time_int_trials for i in range(len(outputs))] for i in range(len(time_ind)): pylab.plot([time_ind[i], time_ind[i]], [0,n_pop], "k") #pylab.plot(time_cls,cls_lb,".") pylab.hold(False) pylab.axis([-10, SIM_TIME+100, -1, n_pop + 2]) pylab.legend(["AN1","AN2" ]) pylab.show() sum_output = [[] for i in range(n_cl)] for i in range(n_trials): t_st = i * time_int_trials t_end = t_st + time_int_trials for j in range(n_cl): sum_output[j].append(np.sum( [1 for n, t in Out_Spikes[j] if t >= t_st and t < t_end]) ) ## Plot 4 if 0: # pylab.figure() # pylab.hold(True) # pylab.plot(sum_output[0], "b.") # pylab.plot(sum_output[1], "r.") # out_cl0 = [i for i in range(len(outputs)) if outputs[i] == 0] # out_cl1 = [i for i in range(len(outputs)) if outputs[i] == 1] # pylab.plot(out_cl0,[-2 for i in range(len(out_cl0))], "xb") # pylab.plot(out_cl1,[-2 for i in range(len(out_cl1))], "xr") # pylab.hold(False) # pylab.title("Total spikes at each AN population for each trial") # pylab.xlabel("Trials") # pylab.ylabel("Firing Rate") # pylab.legend(["Cl0","Cl1","Winning Cl 0", "Winning Cl 1"]) # pylab.axis([-2, n_trials + 2, -4, max(max(sum_output)) + 30]) # pylab.show() pylab.figure() pylab.hold(True) pylab.plot(sum_output[0], "b^") pylab.plot(sum_output[1], "r^") #pylab.plot(sum_output[0],"b") #pylab.plot(sum_output[1],"r") ppp0 = np.array(sum_output[0]) ppp1 = np.array(sum_output[1]) out_cl0 = [i for i in range(len(outputs)) if outputs[i] == 0] out_cl1 = [i for i in range(len(outputs)) if outputs[i] == 1] pylab.plot(out_cl0, ppp0[out_cl0], "bs") pylab.plot(out_cl1, ppp1[out_cl1], "rs") pylab.hold(False) pylab.title("Total spikes at each AN population for each trial") pylab.xlabel("Trials") pylab.ylabel("Spike Count for Each Trial") pylab.legend(["Cls 0", "Cls 1", "Actual Winner Cls 0", "Actual Winner Cls 1"]) pylab.axis([-2, n_trials + 2, -4, max(max(sum_output)) + 30]) pylab.show() ## Check Classification rate s = np.array(sum_output) cl = np.floor((np.sign(s[1] - s[0]) + 1) / 2) r_cl = np.array(outputs) wrong = np.sum(np.abs(cl - r_cl)) rate = (n_trials - wrong) / n_trials print("success rate: {}%".format(abs(rate)*100.)) print("cl:\n", cl) print("r_cl:\n", r_cl) ## Plot 5 if 0: pylab.figure() cf = 0.1 pylab.hold(True) cls_wei0 = np.load("output_files/stdp_weights{}.npy".format(0)) mx = max(cls_wei0) cls_wei0 = cf * cls_wei0 / mx cls_wei1 = np.load("output_files/stdp_weights{}.npy".format(1)) mx = max(cls_wei1) cls_wei1 = cf * cls_wei1/ mx l = min(len(cls_wei0), len(cls_wei1)) new_array0 = [cls_wei0[i] for i in range(l) if cls_wei0[i] > cls_wei1[i]] x0 = [i for i in range(l) if cls_wei0[i] > cls_wei1[i]] new_array1 = [cls_wei1[i] for i in range(l) if cls_wei1[i] > cls_wei0[i]] x1 = [i for i in range(l) if cls_wei1[i] > cls_wei0[i]] pylab.plot(x0, new_array0, "gx") pylab.plot(x1, new_array1, "bx") #for i in range(2): # cls_wei=np.load("stdp_weights{}.npy".format(i)) # mx=max(cls_wei) # cls_wei=0.05*cls_wei/mx # pylab.plot(cls_wei,"x") pylab.axis([-10, 2000, -0.1, 0.15]) pylab.hold(False) pylab.show() ## Plot 7 if 0: sum_filt = [[0 for i in range(n_feature * n_pop)] for j in range(n_cl)] sum_filt = np.array(sum_filt) for i in range(n_trials): t_st = i * time_int_trials t_end = t_st + time_int_trials cl = outputs[i] for n, t in Filt_Exc_Spikes: if t >= t_st and t < t_end: sum_filt[int(cl),int(n)] = sum_filt[(cl),int(n)] + 1 a4=sum_filt[0] b4=sum_filt[1] pylab.figure() pylab.hold(True) pylab.plot(a4,"b.") pylab.plot(b4,"r.") pylab.xlabel('Neuron ID') pylab.ylabel('Total Firing Rates Through Trials') pylab.title("Total Spiking Activity of Neurons at Decomposition Layer for Each Class") pylab.hold(False) pylab.legend(["Activity to AN1","Activity to AN2"]) pylab.show() return rate
import pyNN.spiNNaker as sim sim.setup() p1 = sim.Population(3, sim.SpikeSourceArray, {"spike_times": [1.0, 2.0, 3.0]}) p2 = sim.Population(3, sim.SpikeSourceArray, {"spike_times": [[10.0], [20.0], [30.0]]}) p3 = sim.Population(4, sim.IF_cond_exp, {}) sim.Projection(p2, p3, sim.FromListConnector([ (0, 0, 0.1, 1.0), (1, 1, 0.1, 1.0), (2, 2, 0.1, 1.0)])) #sim.Projection(p1, p3, sim.FromListConnector([(0, 3, 0.1, 1.0)])) # works if this line is added sim.run(100.0)
class MySpiNNakerLinkDevice(ApplicationSpiNNakerLinkVertex): def __init__( self, n_neurons, spinnaker_link_id, label=None): ApplicationSpiNNakerLinkVertex.__init__( self, n_neurons, spinnaker_link_id, label=label) class MySpiNNakerLinkDeviceDataHolder(DataHolder): def __init__(self, spinnaker_link_id, label=None): DataHolder.__init__( self, {"spinnaker_link_id": spinnaker_link_id, "label": label}) @staticmethod def build_model(): return MySpiNNakerLinkDevice p.setup(1.0) poisson = p.Population(1, p.SpikeSourcePoisson(rate=100)) device = p.Population(1, MySpiNNakerLinkDeviceDataHolder(spinnaker_link_id=1)) p.external_devices.activate_live_output_to(poisson, device) p.run(100) p.end()
start_pairing = 1500. start_test_post_pairing = 700. simtime = (start_pairing + start_test_post_pairing + ISI * (n_stim_pairing + n_stim_test) + 550.) # Initialisations of the different types of populations IAddPre = [] IAddPost = [] # +-------------------------------------------------------------------+ # | Creation of neuron populations | # +-------------------------------------------------------------------+ # Neuron populations pre_pop = sim.Population(pop_size, model(**cell_params)) post_pop = sim.Population(pop_size, model(**cell_params)) # Test of the effect of activity of the pre_pop population on the post_pop # population prior to the "pairing" protocol : only pre_pop is stimulated for i in range(n_stim_test): IAddPre.append( sim.Population( pop_size, sim.SpikeSourcePoisson(rate=in_rate, start=start_test_pre_pairing + ISI * i, duration=dur_stim))) # Pairing protocol : pre_pop and post_pop are stimulated with a 10 ms # difference for i in range(n_stim_pairing):
import pyNN.spiNNaker as p INJECTOR_LABEL = "injector" RECEIVER_LABEL = "receiver" # declare python code when received spikes for a timer tick def receive_spikes(label, time, neuron_ids): for neuron_id in neuron_ids: print("Received spike at time {} from {}-{}" "".format(time, label, neuron_id)) p.setup(timestep=1.0) p1 = p.Population(1, p.IF_curr_exp(), label="pop_1") input_injector = p.Population(1, p.external_devices.SpikeInjector(), label=INJECTOR_LABEL) # set up python live spike connection live_spikes_connection = p.external_devices.SpynnakerLiveSpikesConnection( receive_labels=[RECEIVER_LABEL]) # register python receiver with live spike connection live_spikes_connection.add_receive_callback(RECEIVER_LABEL, receive_spikes) input_proj = p.Projection(input, p1, p.OneToOneConnector(), p.StaticSynapse(weight=5, delay=3)) p1.record(["spikes", "v"]) p.run(50)