def setupLayerRN(params, neuronModel, cell_params, injectionPopulations, popPoissionNoiseSource, populationsRN): #create a single RN population divided into virtual clusters one per VR #this will be fed by the noise population and modulated by the relevant ratecoded neuron #to create a rate coded population numVR = params['NUM_VR'] rnClusterSize = int(params['CLUSTER_SIZE']) #* params['NETWORK_SCALE'] rnPopSize = rnClusterSize * numVR popName = 'popRN' popRN = spynnaker.Population(rnPopSize, neuronModel, cell_params, label=popName) populationsRN.append(popRN) #connect one random poisson neuron to each RN neuron weight = params['WEIGHT_POISSON_TO_CLUSTER_RN'] delay = params['DELAY_POISSON_TO_CLUSTER_RN'] connections = utils.fromList_OneRandomSrcForEachTarget(popPoissionNoiseSource._size,popRN._size,weight,delay) projPoissonToClusterRN = spynnaker.Projection(popPoissionNoiseSource, popRN, spynnaker.FromListConnector(connections), target='excitatory') vr = 0 for injectionPopn in injectionPopulations: connections = list() for fromNeuronIdx in range(injectionPopn._size): #connect the correct VR ratecode neuron in popRateCodeSpikes to corresponding subsection (cluster) of the RN population weight = params['WEIGHT_RATECODE_TO_CLUSTER_RN'] firstIndex = vr * rnClusterSize lastIndex = firstIndex + rnClusterSize - 1 connections += utils.fromList_SpecificNeuronToRange(fromNeuronIdx,firstIndex,lastIndex,weight,params['MIN_DELAY_RATECODE_TO_CLUSTER_RN'],params['MAX_DELAY_RATECODE_TO_CLUSTER_RN']) vr = vr + 1 #after the last neuron in the current injection pop, create a projection to the RN projRateToClusterRN = spynnaker.Projection(injectionPopn, popRN, spynnaker.FromListConnector(connections), target='excitatory') print 'Added projection to RN of ', len(connections), " connections from injection pop ", injectionPopn.label, "(size ", injectionPopn._size,")"
def setupLayerInput(params, spikeSourceVrResponsePath, spikeSourceActiveClassPath, populationsInput,learning): #Create a population, one neuron per VR, #where each neuron wil be loaded with the rate code spikes for the VR response over the training and/or test set spikeData = utils.readSpikeSourceDataFile(spikeSourceVrResponsePath) numVR = params['NUM_VR'] numRatecodeNeurons = numVR popRateCodeSpikes = spynnaker.Population(numRatecodeNeurons, spynnaker.SpikeSourceArray, spikeData, label='popRateCodeSpikes') populationsInput.append(popRateCodeSpikes) if learning: #Create a population, one neuron per class, #During training the neuron representing the current class will be active with significant spikes, the others will be quiet #The purpose is to innervate the relevant ouptut class cluster/population so that fire-together-wire-together hebbian learning (via STDP) stregthens synapses from active PN clusters #During testing all these neurons will be silent, leaving the strengthened synapses to trigger activity direct from PN layer in the correct ouptpu cluster spikeData = utils.readSpikeSourceDataFile(spikeSourceActiveClassPath) numNeurons = params['NUM_CLASSES'] popClassActivationSpikes = spynnaker.Population(numNeurons, spynnaker.SpikeSourceArray, spikeData, label='popClassActivationSpikes') populationsInput.append(popClassActivationSpikes) else: #create an orphan dummy popn of 1 neuron to take the place of the now unused spike source pop used in learning #This is to ensure that the freed up core does not get co-opted by the PN layer config routine # as this would makae the learning and testing configurations different in PN which would likely make the saved PNAN weight arrays incorrect popClassActivationSpikes = spynnaker.Population(1, neuronModel, cell_params, label='dummy_popClassActivationSpikes') populationsInput.append(popClassActivationSpikes)
def setupLayerInput(params, settings, populationsInput): numVR = params["NUM_VR"] numRatecodeNeurons = numVR spikeSourceVrResponsePath = settings["SPIKE_SOURCE_VR_RESPONSE_TRAIN"] spikeSourceVrResponsePathTest = settings["SPIKE_SOURCE_VR_RESPONSE_TEST"] spikeSourceActiveClassPath = settings["SPIKE_SOURCE_CLASS_ACTIVATIONS"] learning = settings["LEARNING"] if learning: # Create a population, one neuron per VR, # where each neuron wil be loaded with the rate code spikes for the # VR response over the training set spikeDataVR = utils.readSpikeSourceDataFile(spikeSourceVrResponsePath) popRateCodeSpikes = spynnaker.Population( numRatecodeNeurons, spynnaker.SpikeSourceArray, spikeDataVR, label="popRateCodeSpikes" ) populationsInput.append(popRateCodeSpikes) # Create a population, one neuron per class, # During training the neuron representing the current class will be # active with significant spikes, the others will be quiet. # # The purpose is to innervate the relevant output class cluster/population # so that fire-together-wire-together hebbian learning (via STDP) # stregthens synapses from active PN clusters # # During testing all these neurons will be silent, leaving # the strengthened synapses to trigger activity direct from PN layer # in the correct output cluster. spikeDataClass = utils.readSpikeSourceDataFile(spikeSourceActiveClassPath) numNeurons = params["NUM_CLASSES"] popClassActivationSpikes = spynnaker.Population( numNeurons, spynnaker.SpikeSourceArray, spikeDataClass, label="popClassActivationSpikes" ) populationsInput.append(popClassActivationSpikes) else: # Create a population, one neuron per VR, # where each neuron wil be loaded with the rate code spikes for # the VR response over the test set spikeDataVRTest = utils.readSpikeSourceDataFile(spikeSourceVrResponsePathTest) popRateCodeSpikesTest = spynnaker.Population( numRatecodeNeurons, spynnaker.SpikeSourceArray, spikeDataVRTest, label="popRateCodeSpikes" ) populationsInput.append(popRateCodeSpikesTest) # create an orphan dummy popn of 1 neuron to take the place of the now # unused spike source pop used in learning # This is to ensure that the freed up core does not get co-opted by the # PN layer config routine # as this would makae the learning and testing configurations different # in PN which would likely make the saved PNAN weight arrays incorrect popClassActivationSpikes = spynnaker.Population( 1, neuronModel, cell_params, label="dummy_popClassActivationSpikes" ) populationsInput.append(popClassActivationSpikes)
def saveLearntWeightsPNAN(settings,params,projectionsPNAN,numPopsPN,numPopsAN): delayPNAN = int(params['DELAY_PN_AN']) projections = iter(projectionsPNAN) for an in range(numPopsAN): for pn in range(numPopsPN): weightsMatrix = projections.next().getWeights(format="array") weightsList = utils.fromList_convertWeightMatrix(weightsMatrix, delayPNAN) #utils.printSeparator() #print 'weightsList[',pn,',',an,']',weightsList utils.saveListToFile(weightsList, getWeightsFilename(settings,'PNAN',pn, an))
def saveLearntWeightsPNAN(settings, params, projectionsPNAN, numPopsPN, numPopsAN): delayPNAN = int(params["DELAY_PN_AN"]) projections = iter(projectionsPNAN) for an in range(numPopsAN): for pn in range(numPopsPN): weightsMatrix = projections.next().getWeights(format="array") weightsList = utils.fromList_convertWeightMatrix(weightsMatrix, delayPNAN) # utils.printSeparator() # print 'weightsList[',pn,',',an,']',weightsList utils.saveListToFile(weightsList, getWeightsFilename(settings, "PNAN", pn, an))
def saveLearntWeightsPNAN(settings,params,projectionsPNAN,numPopsPN,numPopsAN): delayPNAN = int(params['DELAY_PN_AN']) projections = iter(projectionsPNAN) for an in range(numPopsAN): for pn in range(numPopsPN): weightsMatrix = projections.next().getWeights(format="array") #print 'weightsMatrix with NaN',weightsMatrix weightsMatrix = np.nan_to_num(weightsMatrix) #sets NaN to 0.0 , no connection from x to y is specified as a NaN entry, may cause problem on imports #print 'weightsMatrix without NaN',weightsMatrix weightsList = utils.fromList_convertWeightMatrix(weightsMatrix, delayPNAN) utils.printSeparator() #print 'weightsList[',pn,',',an,']',weightsList utils.saveListToFile(weightsList, getWeightsFilename(settings,'PNAN',pn, an))
def calculateScore(winningClassesByObservation, classLabels): utils.printSeparator() print "Correct Answers", classLabels print "Classifier Responses", winningClassesByObservation numObservations = len(winningClassesByObservation) score = 0.0 for i in range(numObservations): if winningClassesByObservation[i] == classLabels[i]: score = score + 1.0 scorePercent = 100.0 * score / float(numObservations) print "Score: ", int(score), "out of ", numObservations, "(", scorePercent, "%)" utils.printSeparator() return scorePercent
def calculateScore(winningClassesByObservation,classLabels): utils.printSeparator() print 'Correct Answers', classLabels print 'Classifier Responses', winningClassesByObservation numObservations = len(winningClassesByObservation) score = 0.0 for i in range (numObservations): if winningClassesByObservation[i] == classLabels[i]: score = score + 1.0 scorePercent = 100.0 * score/float(numObservations) print 'Score: ', int(score), 'out of ', numObservations, '(', scorePercent, '%)' utils.printSeparator() return scorePercent
def calculateWinnersAN(settings,populationsAN): numClasses = len(populationsAN) numObservations = settings['NUM_OBSERVATIONS'] observationExposureTimeMs = settings['OBSERVATION_EXPOSURE_TIME_MS'] #set up lists to hold highest spike count and current winning class so far for each observation winningSpikeCount = [0] * numObservations winningClass = [-1] * numObservations for cls in range(numClasses): allSpikes = populationsAN[cls].getSpikes(compatible_output=True) for observation in range(numObservations): startMs = observation * observationExposureTimeMs endMs = startMs + observationExposureTimeMs observationSpikes = utils.getSpikesBetween(startMs,endMs,allSpikes) spikeCount= observationSpikes.shape[0] #print 'StartMs:', startMs, 'EndMs:', endMs, 'Observation:' , observation, 'Class:' , cls, 'Spikes:' , spikeCount if spikeCount > winningSpikeCount[observation]: winningSpikeCount[observation] = spikeCount winningClass[observation] = cls return winningClass
def calculateWinnersAN(settings, populationsAN, classLabels): nrObs = len(classLabels) numTotClasses = len(populationsAN) observationExposureTimeMs = settings['OBSERVATION_EXPOSURE_TIME_MS'] #set up lists to hold highest spike count and current winning class so #far for each observation winningSpikeCount = [0] * nrObs winningClass = [0] * nrObs for cls in range(numTotClasses): allSpikes = populationsAN[cls].getSpikes(compatible_output=True) for observation in range(nrObs): startMs = observation * observationExposureTimeMs endMs = startMs + observationExposureTimeMs observationSpikes = utils.getSpikesBetween(startMs, endMs, allSpikes) spikeCount = observationSpikes.shape[0] #print spikeCount #print 'StartMs:', startMs, 'EndMs:', endMs, 'Observation:' , #observation, 'Class:' , cls, 'Spikes:' , spikeCount if spikeCount > winningSpikeCount[observation] and spikeCount > 500: winningSpikeCount[observation] = spikeCount winningClass[observation] = cls return winningClass, winningSpikeCount
def calculateWinnersAN(settings, populationsAN, classLabels): nrObs = len(classLabels) numTotClasses = len(populationsAN) observationExposureTimeMs = settings["OBSERVATION_EXPOSURE_TIME_MS"] # set up lists to hold highest spike count and current winning class so # far for each observation winningSpikeCount = [0] * nrObs winningClass = [0] * nrObs for cls in range(numTotClasses): allSpikes = populationsAN[cls].getSpikes(compatible_output=True) for observation in range(nrObs): startMs = observation * observationExposureTimeMs endMs = startMs + observationExposureTimeMs observationSpikes = utils.getSpikesBetween(startMs, endMs, allSpikes) spikeCount = observationSpikes.shape[0] # print spikeCount # print 'StartMs:', startMs, 'EndMs:', endMs, 'Observation:' , # observation, 'Class:' , cls, 'Spikes:' , spikeCount if spikeCount > winningSpikeCount[observation] and spikeCount > 500: winningSpikeCount[observation] = spikeCount winningClass[observation] = cls return winningClass, winningSpikeCount
def setupLayerRN(params, neuronModel, cell_params, popRateCodeSpikes, popPoissionNoiseSource, populationsRN): # create a single RN population divided into virtual clusters one per VR # this will be fed by the noise population and modulated by the relevant # ratecoded neuron # to create a rate coded population numVR = params["NUM_VR"] rnClusterSize = params["CLUSTER_SIZE"] * params["NETWORK_SCALE"] rnPopSize = rnClusterSize * numVR popName = "popRN" popRN = spynnaker.Population(rnPopSize, neuronModel, cell_params, label=popName) populationsRN.append(popRN) # connect one random poisson neuron to each RN neuron weight = params["WEIGHT_POISSON_TO_CLUSTER_RN"] delay = params["DELAY_POISSON_TO_CLUSTER_RN"] connections = utils.fromList_OneRandomSrcForEachTarget(popPoissionNoiseSource._size, popRN._size, weight, delay) projPoissonToClusterRN = spynnaker.Projection( popPoissionNoiseSource, popRN, spynnaker.FromListConnector(connections), target="excitatory" ) connections = list() for vr in range(numVR): # connect the correct VR ratecode neuron in popRateCodeSpikes to # corresponding subsection (cluster) of the RN population weight = params["WEIGHT_RATECODE_TO_CLUSTER_RN"] firstIndex = vr * rnClusterSize lastIndex = firstIndex + rnClusterSize - 1 connections += utils.fromList_SpecificNeuronToRange( vr, firstIndex, lastIndex, weight, params["MIN_DELAY_RATECODE_TO_CLUSTER_RN"], params["MAX_DELAY_RATECODE_TO_CLUSTER_RN"], ) projRateToClusterRN = spynnaker.Projection( popRateCodeSpikes, popRN, spynnaker.FromListConnector(connections), target="excitatory" )
def recordSpecifiedPopulations(settings,populationsInput,populationsNoiseSource,populationsRN,populationsPN,populationsAN): recordPopnInput = settings['RECORD_POP_INPUT_LEARNING'] recordPopnNoise = settings['RECORD_POP_NOISE_SOURCE_LEARNING'] recordPopnRN = settings['RECORD_POP_RN_LEARNING'] recordPopnPN = settings['RECORD_POP_PN_LEARNING'] recordPopnAN = settings['RECORD_POP_AN_LEARNING'] if not settings['LEARNING']: recordPopnInput = settings['RECORD_POP_INPUT_TESTING'] recordPopnNoise = settings['RECORD_POP_NOISE_SOURCE_TESTING'] recordPopnRN = settings['RECORD_POP_RN_TESTING'] recordPopnPN = settings['RECORD_POP_PN_TESTING'] recordPopnAN = settings['RECORD_POP_AN_TESTING'] utils.recordPopulations(populationsInput,recordPopnInput) utils.recordPopulations(populationsNoiseSource,recordPopnNoise) utils.recordPopulations(populationsRN,recordPopnRN) utils.recordPopulations(populationsPN,recordPopnPN) utils.recordPopulations(populationsAN,recordPopnAN) return (recordPopnInput,recordPopnNoise,recordPopnRN,recordPopnPN,recordPopnAN)
def printParameters(title, params): utils.printSeparator() print title utils.printSeparator() for param in params: print param, '=', params[param] utils.printSeparator()
def printParameters(title, params): utils.printSeparator() print title utils.printSeparator() for param in params: print param, "=", params[param] utils.printSeparator()
def setupLayerAN(params, settings, neuronModel, cell_params, popClassActivation, popPoissionNoiseSource, populationsPN, populationsAN,learning,projectionsPNAN): #create an Association Neuron AN cluster population per class #this will be fed by: #1) PN clusters via plastic synapses #2) Class activation to innervate the correct AN cluster for a given input #3) laterally inhibit between AN clusters numClasses = params['NUM_CLASSES'] anClusterSize = params['CLUSTER_SIZE'] * params['NETWORK_SCALE'] for an in range(numClasses): popName = 'popClusterAN_' + str(an) ; popClusterAN = spynnaker.Population(anClusterSize, neuronModel, cell_params, label=popName) populationsAN.append(popClusterAN) #connect neurons in every PN popn to x% (e.g 50%) neurons in this AN cluster for pn in range(len(populationsPN)): if learning: projLabel = 'Proj_PN' + str(pn) + '_AN' + str(an) projClusterPNToClusterAN = connectClusterPNtoAN(params,populationsPN[pn],popClusterAN,projLabel) projectionsPNAN.append(projClusterPNToClusterAN) #keep handle to use later for saving off weights at end of learning else: #Without plasticity, create PNAN FromList connectors using weights saved during learning stage connections = utils.loadListFromFile(getWeightsFilename(settings,'PNAN',pn,an)) #print 'Loaded weightsList[',pn,',',an,']',connections projClusterPNToClusterAN = spynnaker.Projection(populationsPN[pn], popClusterAN,spynnaker.FromListConnector(connections), target='excitatory') if learning: #use the class activity input neurons to create correlated activity during learining in the corresponding class cluster weight = params['WEIGHT_CLASS_ACTIVITY_TO_CLUSTER_AN'] connections = utils.fromList_SpecificNeuronToAll(an,anClusterSize,weight,params['MIN_DELAY_CLASS_ACTIVITY_TO_CLUSTER_AN'],params['MAX_DELAY_CLASS_ACTIVITY_TO_CLUSTER_AN']) projClassActivityToClusterAN = spynnaker.Projection(popClassActivation, popClusterAN, spynnaker.FromListConnector(connections), target='excitatory') #connect each AN cluster to inhibit every other AN cluster utils.createInterPopulationWTA(populationsAN,params['WEIGHT_WTA_AN_AN'],params['DELAY_WTA_AN_AN'],float(params['CONNECTIVITY_WTA_AN_AN']))
def setupLayerAN(params, settings, neuronModel, cell_params, popClassActivation, popPoissionNoiseSource, populationsPN, populationsAN,learning,projectionsPNAN): #create an Association Neuron AN cluster population per class #this will be fed by: #1) PN clusters via plastic synapses #2) Class activation to innervate the correct AN cluster for a given input #3) laterally inhibit between AN clusters numClasses = params['NUM_CLASSES'] anClusterSize = int(params['CLUSTER_SIZE']) #* params['NETWORK_SCALE'] for an in range(numClasses): popName = 'popClusterAN_' + str(an) ; popClusterAN = spynnaker.Population(anClusterSize, neuronModel, cell_params, label=popName) populationsAN.append(popClusterAN) #connect neurons in every PN popn to x% (e.g 50%) neurons in this AN cluster for pn in range(len(populationsPN)): if learning: projLabel = 'Proj_PN' + str(pn) + '_AN' + str(an) projClusterPNToClusterAN = connectClusterPNtoAN(params,populationsPN[pn],popClusterAN,float(settings['OBSERVATION_EXPOSURE_TIME_MS']),projLabel) projectionsPNAN.append(projClusterPNToClusterAN) #keep handle to use later for saving off weights at end of learning else: #Without plasticity, create PNAN FromList connectors using weights saved during learning stage connections = utils.loadListFromFile(getWeightsFilename(settings,'PNAN',pn,an)) #print 'Loaded weightsList[',pn,',',an,']',connections tupleList = utils.createListOfTuples(connections) #new version only accepts list of tuples not list of lists #print 'tupleList[',pn,',',an,']',tupleList conn = spynnaker.FromListConnector(tupleList) projClusterPNToClusterAN = spynnaker.Projection(populationsPN[pn], popClusterAN,conn, target='excitatory') if learning: #use the class activity input neurons to create correlated activity during learining in the corresponding class cluster weight = params['WEIGHT_CLASS_EXCITATION_TO_CLUSTER_AN'] connections = utils.fromList_SpecificNeuronToAll(an,anClusterSize,weight,params['MIN_DELAY_CLASS_ACTIVITY_TO_CLUSTER_AN'],params['MAX_DELAY_CLASS_ACTIVITY_TO_CLUSTER_AN']) projClassActivityToClusterAN = spynnaker.Projection(popClassActivation, popClusterAN, spynnaker.FromListConnector(connections), target='excitatory') else: #testing #send spikes on these outputs back to correct host port , these will be used to determine winner etc anHostReceivePort = int(settings['AN_HOST_RECEIVE_PORT']) ExternalDevices.activate_live_output_for(popClusterAN,port=anHostReceivePort) #connect each AN cluster to inhibit every other AN cluster utils.createInterPopulationWTA(populationsAN,params['WEIGHT_WTA_AN_AN'],params['DELAY_WTA_AN_AN'],float(params['CONNECTIVITY_WTA_AN_AN'])) #inhibit other non-corresponding class clusters if learning: weight = params['WEIGHT_CLASS_INHIBITION_TO_CLUSTER_AN'] for activeCls in range(numClasses): connections = utils.fromList_SpecificNeuronToAll(activeCls,anClusterSize,weight,params['MIN_DELAY_CLASS_ACTIVITY_TO_CLUSTER_AN'],params['MAX_DELAY_CLASS_ACTIVITY_TO_CLUSTER_AN']) for an in range(numClasses): if an != activeCls: projClassActivityToClusterAN = spynnaker.Projection(popClassActivation, populationsAN[an], spynnaker.FromListConnector(connections), target='inhibitory')
def plot_spike_sources(filePath, fileName, nrInputNeurons, nrVR, observationTime, totalSimulationTime, classLabels, odourNames): '''Plot the Poisson spike source matrix Input: -path of the spike times file -name of the spike times file -number of input neurons (= number of sensors) -number of virtual receptors -length of the Poisson spike train for each sample -maximum simulation time for each recording (number of samples for each recording) x (length of Poisson spike train for each sample) -class labels -names of the odours used ''' bckndNames =[[]]*len(odourNames) spikeTimes = utils.readSpikeSourceDataFile(os.path.join(filePath, fileName))['spike_times'] plt.figure(figsize=(20,20)) for idx, line in enumerate(spikeTimes): for x in line: plt.plot(x, idx, 'ko', markersize = 2) for j in range(idx, nrVR): plt.plot(0, j, 'k,') for j, classLabel in enumerate(classLabels): plt.axvspan(j*observationTime, j*observationTime+observationTime, facecolor=colors[int(classLabel)], alpha=0.3) for idxO, odour in enumerate(odourNames): bckndNames[idxO] = mpatches.Patch(color=colors[idxO], label=odour) plt.legend(handles=bckndNames, loc ='best', prop={'size':20}) plt.xlabel('Simulation time[ms]', fontsize=20) plt.ylabel('%i Virtual receptors per sensor'%(nrVR/nrInputNeurons), fontsize=20) plt.tick_params(labelsize=20) plt.title('VR spike times for classes %s'%str(classLabels), fontsize=20) plt.savefig(fileName+'.pdf') plt.close()
def printModelConfigurationSummary(params, populationsInput, populationsNoiseSource, populationsRN, populationsPN, populationsAN): totalPops = len(populationsInput) + len(populationsNoiseSource) + len(populationsRN) + len(populationsPN) + len(populationsAN) stdMaxNeuronsPerCore = params['MAX_NEURONS_PER_CORE'] stdpMaxNeuronsPerCore = params['MAX_STDP_NEURONS_PER_CORE'] inputCores = utils.coresRequired(populationsInput, stdMaxNeuronsPerCore) noiseCores = utils.coresRequired(populationsNoiseSource, stdMaxNeuronsPerCore) rnCores = utils.coresRequired(populationsRN, stdMaxNeuronsPerCore) pnCores = utils.coresRequired(populationsPN, stdMaxNeuronsPerCore) anCores = utils.coresRequired(populationsAN, stdpMaxNeuronsPerCore) utils.printSeparator() print 'Population(Cores) Summary' utils.printSeparator() print 'Input: ', len(populationsInput), '(', inputCores, ' cores)' print 'Noise: ', len(populationsNoiseSource), '(', noiseCores, ' cores)' print 'RN: ', len(populationsRN), '(', rnCores, ' cores)' print 'PN: ', len(populationsPN), '(', pnCores, ' cores)' print 'AN: ', len(populationsAN), '(', anCores, ' cores)' print 'TOTAL: ', totalPops, '(', inputCores + noiseCores + rnCores + pnCores + anCores, ' cores)' utils.printSeparator()
def printModelConfigurationSummary( params, populationsInput, populationsNoiseSource, populationsRN, populationsPN, populationsAN ): totalPops = ( len(populationsInput) + len(populationsNoiseSource) + len(populationsRN) + len(populationsPN) + len(populationsAN) ) stdMaxNeuronsPerCore = params["MAX_NEURONS_PER_CORE"] stdpMaxNeuronsPerCore = params["MAX_STDP_NEURONS_PER_CORE"] inputCores = utils.coresRequired(populationsInput, stdMaxNeuronsPerCore) noiseCores = utils.coresRequired(populationsNoiseSource, stdMaxNeuronsPerCore) rnCores = utils.coresRequired(populationsRN, stdMaxNeuronsPerCore) pnCores = utils.coresRequired(populationsPN, stdMaxNeuronsPerCore) anCores = utils.coresRequired(populationsAN, stdpMaxNeuronsPerCore) utils.printSeparator() print "Population(Cores) Summary" utils.printSeparator() print "Input: ", len(populationsInput), "(", inputCores, " cores)" print "Noise: ", len(populationsNoiseSource), "(", noiseCores, " cores)" print "RN: ", len(populationsRN), "(", rnCores, " cores)" print "PN: ", len(populationsPN), "(", pnCores, " cores)" print "AN: ", len(populationsAN), "(", anCores, " cores)" print "TOTAL: ", totalPops, "(", inputCores + noiseCores + rnCores + pnCores + anCores, " cores)" utils.printSeparator()
import matplotlib.pyplot as plt import Classifier as classifier import ModellingUtils as utils import sys import os.path import os params = eval(open("ModelParams-MNISTClassifier.txt").read()) settings = eval(open("Settings-MNISTClassifier.txt").read()) #clear marker file if utils.fileExists(settings['RUN_COMPLETE_FILE']): os.remove(settings['RUN_COMPLETE_FILE']) #Override default params with any passed args numArgumentsProvided = len(sys.argv) - 1 if numArgumentsProvided >=1 : settings['LEARNING'] = eval(sys.argv[1]) if numArgumentsProvided >=2 : params['NUM_VR'] = int(sys.argv[2]) if numArgumentsProvided >=3 : params['NUM_CLASSES'] = int(sys.argv[3]) if numArgumentsProvided >=4 : settings['SPIKE_SOURCE_VR_RESPONSE_PATH'] = sys.argv[4] if numArgumentsProvided >=5 : settings['SPIKE_SOURCE_ACTIVE_CLASS_PATH'] = sys.argv[5] if numArgumentsProvided >=6 : settings['NUM_OBSERVATIONS'] = int(sys.argv[6]) if numArgumentsProvided >=7 : settings['OBSERVATION_EXPOSURE_TIME_MS'] = int(sys.argv[7])
def setupLayerPN(params, neuronModel, cell_params, populationsRN, populationsPN): # create a projection neuron PN cluster population per VR # this will be fed by the equivalent RN population and will laterally # inhibit between clusters numVR = int(params["NUM_VR"]) # print 'PN layer, no. VR: ' , numVR pnClusterSize = int(params["CLUSTER_SIZE"] * params["NETWORK_SCALE"]) maxNeuronsPerCore = int(params["MAX_NEURONS_PER_CORE"]) maxVrPerPop = maxNeuronsPerCore / pnClusterSize # how many cores were needed to accomodate RN layer (1 pynn pop in this case) numCoresRN = utils.coresRequired(populationsRN, maxNeuronsPerCore) # print 'The RN layer is taking up ', numCoresRN, ' cores' coresAvailablePN = int( params["CORES_ON_BOARD"] - params["NUM_CLASSES"] - numCoresRN - 3 ) # 2 x input, 1 x noise source # print 'PN layer, no. cores available:' , coresAvailablePN vrPerPop = int(ceil(float(numVR) / float(coresAvailablePN))) if vrPerPop > maxVrPerPop: print "The number of VR and/or cluster size stipulated for \ this model are too a large for the capacity of this board." quit # print 'PN layer, no. VRs per population will be: ', vrPerPop pnPopSize = pnClusterSize * vrPerPop # print 'PN layer, neurons per population will be: ', pnPopSize numPopPN = int(ceil(float(numVR) / float(vrPerPop))) # print 'PN layer, number of populations(cores) used will be: ', numPopPN # print 'PN layer, spare (unused) cores : ', coresAvailablePN - numPopPN weightPNPN = float(params["WEIGHT_WTA_PN_PN"]) delayPNPN = int(params["DELAY_WTA_PN_PN"]) connectivityPNPN = float(params["CONNECTIVITY_WTA_PN_PN"]) for p in range(numPopPN): popName = "popPN_" + str(p) popPN = spynnaker.Population(pnPopSize, neuronModel, cell_params, label=popName) # print 'created population ', popName populationsPN.append(popPN) # create a FromList to feed each PN neuron in this popn from its # corresponding RN neuron in the single monolithic RN popn weightRNPN = float(params["WEIGHT_RN_PN"]) delayRNPN = int(params["DELAY_RN_PN"]) rnStartIdx = p * pnPopSize rnEndIdx = rnStartIdx + pnPopSize - 1 # The last PN popn will often have unneeded 'ghost' clusters at # the end due to imperfect dstribution of VRs among cores # As there is no RN cluster that feeds these (RN is one pop of the # correct total size) so the connections must stop at the end of RN rnMaxIdx = populationsRN[0]._size - 1 if rnEndIdx > rnMaxIdx: rnEndIdx = rnMaxIdx # clamp to the end of the RN population pnEndIdx = rnEndIdx - rnStartIdx connections = utils.fromList_OneToOne_fromRangeToRange( rnStartIdx, rnEndIdx, 0, pnEndIdx, weightRNPN, delayRNPN, delayRNPN ) projClusterRNToClusterPN = spynnaker.Projection( populationsRN[0], popPN, spynnaker.FromListConnector(connections), target="excitatory" ) # within this popn only, connect each PN sub-population VR # "cluster" to inhibit every other if vrPerPop > 1: utils.createIntraPopulationWTA(popPN, vrPerPop, weightPNPN, delayPNPN, connectivityPNPN, True) # Also connect each PN cluster to inhibit every other cluster utils.createInterPopulationWTA(populationsPN, weightPNPN, delayPNPN, connectivityPNPN)
def setupLayerPN(params, neuronModel, cell_params, populationsRN, populationsPN): #create a projection neuron PN cluster population per VR #this will be fed by the equivalent RN population and will laterally #inhibit between clusters numVR = int(params['NUM_VR']) #print 'PN layer, no. VR: ' , numVR pnClusterSize = int(params['CLUSTER_SIZE'] * params['NETWORK_SCALE']) maxNeuronsPerCore = int(params['MAX_NEURONS_PER_CORE']) maxVrPerPop = maxNeuronsPerCore / pnClusterSize #how many cores were needed to accomodate RN layer (1 pynn pop in this case) numCoresRN = utils.coresRequired(populationsRN, maxNeuronsPerCore) #print 'The RN layer is taking up ', numCoresRN, ' cores' coresAvailablePN = int(params['CORES_ON_BOARD'] - params['NUM_CLASSES'] - numCoresRN - 3) # 2 x input, 1 x noise source #print 'PN layer, no. cores available:' , coresAvailablePN vrPerPop = int(ceil(float(numVR) / float(coresAvailablePN))) if vrPerPop > maxVrPerPop: print 'The number of VR and/or cluster size stipulated for \ this model are too a large for the capacity of this board.' quit #print 'PN layer, no. VRs per population will be: ', vrPerPop pnPopSize = pnClusterSize * vrPerPop #print 'PN layer, neurons per population will be: ', pnPopSize numPopPN = int(ceil(float(numVR) / float(vrPerPop))) #print 'PN layer, number of populations(cores) used will be: ', numPopPN #print 'PN layer, spare (unused) cores : ', coresAvailablePN - numPopPN weightPNPN = float(params['WEIGHT_WTA_PN_PN']) delayPNPN = int(params['DELAY_WTA_PN_PN']) connectivityPNPN = float(params['CONNECTIVITY_WTA_PN_PN']) for p in range(numPopPN): popName = 'popPN_' + str(p) popPN = spynnaker.Population(pnPopSize, neuronModel, cell_params, label=popName) #print 'created population ', popName populationsPN.append(popPN) #create a FromList to feed each PN neuron in this popn from its #corresponding RN neuron in the single monolithic RN popn weightRNPN = float(params['WEIGHT_RN_PN']) delayRNPN = int(params['DELAY_RN_PN']) rnStartIdx = p * pnPopSize rnEndIdx = rnStartIdx + pnPopSize - 1 # The last PN popn will often have unneeded 'ghost' clusters at #the end due to imperfect dstribution of VRs among cores # As there is no RN cluster that feeds these (RN is one pop of the #correct total size) so the connections must stop at the end of RN rnMaxIdx = populationsRN[0]._size - 1 if rnEndIdx > rnMaxIdx: rnEndIdx = rnMaxIdx #clamp to the end of the RN population pnEndIdx = rnEndIdx - rnStartIdx connections = utils.fromList_OneToOne_fromRangeToRange( rnStartIdx, rnEndIdx, 0, pnEndIdx, weightRNPN, delayRNPN, delayRNPN) projClusterRNToClusterPN = spynnaker.Projection( populationsRN[0], popPN, spynnaker.FromListConnector(connections), target='excitatory') #within this popn only, connect each PN sub-population VR #"cluster" to inhibit every other if vrPerPop > 1: utils.createIntraPopulationWTA(popPN, vrPerPop, weightPNPN, delayPNPN, connectivityPNPN, True) #Also connect each PN cluster to inhibit every other cluster utils.createInterPopulationWTA(populationsPN, weightPNPN, delayPNPN, connectivityPNPN)
def saveSpikesAN(settings,populationsAN): for i in range(len(populationsAN)): path = settings['CACHE_DIR'] + '/Spikes_Class' + str(i) + '.csv' utils.saveSpikesToFile(populationsAN[i],path)
def runClassifier(params, settings, fold): classifier.printParameters('Model Parameters',params) classifier.printParameters('Classifier Settings',settings) populationsInput = list() populationsNoiseSource = list() populationsRN = list() populationsPN = list() populationsAN = list() projectionsPNAN = list() #keep handle to these for saving learnt weights if settings['LEARNING']: totalSimulationTime = float(settings['OBSERVATION_EXPOSURE_TIME_MS'] * settings['NUM_OBSERVATIONS']) else: totalSimulationTime = float(settings['OBSERVATION_EXPOSURE_TIME_MS'] * settings['NUM_OBSERVATIONS_TEST']) print 'Total Simulation Time will be', totalSimulationTime DT = 1.0 #ms Integration timestep for simulation classifier.setupModel(params, settings, DT, totalSimulationTime, populationsInput, populationsNoiseSource, populationsRN,populationsPN,populationsAN,projectionsPNAN) utils.recordPopulations(populationsInput,settings['RECORD_POP_INPUT']) utils.recordPopulations(populationsNoiseSource,settings['RECORD_POP_NOISE_SOURCE']) utils.recordPopulations(populationsRN,settings['RECORD_POP_RN']) utils.recordPopulations(populationsPN,settings['RECORD_POP_PN']) utils.recordPopulations(populationsAN,settings['RECORD_POP_AN']) #run the model for the whole learning or the whole testing period classifier.run(totalSimulationTime) fig1 = plt.figure(figsize=(20,20)) plt.xlabel('Time[ms]', fontsize = 16) plt.ylabel('Neurons', fontsize = 16) title = 'Testing' if settings['LEARNING']: title = 'Training' title = title + ' - Odour Classification - ' + str(params['NUM_VR']) + \ ' Virtual Receptors' fig1.suptitle(title, fontsize = 18) indexOffset = 0 indexOffset = 1 + utils.plotAllSpikes(populationsInput, totalSimulationTime, indexOffset, settings['RECORD_POP_INPUT']) indexOffset = 1 + utils.plotAllSpikes(populationsNoiseSource, totalSimulationTime, indexOffset, settings['RECORD_POP_NOISE_SOURCE']) indexOffset = 1 + utils.plotAllSpikes(populationsRN, totalSimulationTime, indexOffset,settings['RECORD_POP_RN']) indexOffset = 1 + utils.plotAllSpikes(populationsPN, totalSimulationTime, indexOffset,settings['RECORD_POP_PN']) indexOffset = 1 + utils.plotAllSpikes(populationsAN, totalSimulationTime, indexOffset,settings['RECORD_POP_AN']) filename = 'RasterPlot-Testing-fold' + str(fold)+'.pdf' if settings['LEARNING']: filename = 'RasterPlot-Training-fold' + str(fold)+'.pdf' plt.savefig(filename) plt.close() (fig2, (ax1, ax2, ax3)) = plt.subplots(3, 1, figsize=(20,20), sharex=True) plt.axes(ax1) utils.plotAllSpikes(populationsRN,totalSimulationTime,0, settings['RECORD_POP_RN']) plt.axes(ax2) utils.plotAllSpikes(populationsPN,totalSimulationTime,0, settings['RECORD_POP_PN']) plt.axes(ax3) utils.plotAllSpikes(populationsAN,totalSimulationTime,0, settings['RECORD_POP_AN']) ax1.set_title('RN layer spikes', fontsize = 30) ax2.set_title('PN layer spikes', fontsize = 30) ax3.set_title('AN layer spikes', fontsize = 30) ax3.set_xlabel('Simulation time[ms]', fontsize = 30) ax3.set_ylabel('Neuron indices', fontsize = 30) ax3.tick_params(labelsize=20) ax2.tick_params(labelsize=20) ax1.tick_params(labelsize=20) filename = 'Separated_RasterPlot-Testing-fold' + str(fold)+'.pdf' if settings['LEARNING']: filename = 'Separated_RasterPlot-Training-fold' + str(fold)+'.pdf' plt.savefig(filename) plt.close() # fig.add_subplot(2,1,2) # utils.plotAllSpikes(populationsAN,totalSimulationTime, 0, settings['RECORD_POP_AN']) #if in the learning stage if settings['LEARNING']: #store the weight values learnt via plasticity, these will be reloaded as #static weights for test stage classLabels = utils.loadListFromCsvFile(settings['CLASS_LABELS_TRAIN'],True) classifier.saveLearntWeightsPNAN(settings, params, projectionsPNAN, len(populationsPN),len(populationsAN)) winningClassesByObservation, winningSpikeCounts = classifier.calculateWinnersAN(settings,populationsAN, classLabels) scorePercent = classifier.calculateScore(winningClassesByObservation,classLabels) else: #save the AN layer spike data from the testing run. #This data will be interrogated to find the winning class (most active AN pop) #during the presentation of each test observation #classifier.saveSpikesAN(settings,populationsAN) classLabels = utils.loadListFromCsvFile(settings['CLASS_LABELS_TEST'],True) winningClassesByObservation, winningSpikeCounts = classifier.calculateWinnersAN(settings,populationsAN, classLabels) scorePercent = classifier.calculateScore(winningClassesByObservation, classLabels) utils.saveListAsCsvFile(winningClassesByObservation,settings['CLASSIFICATION_RESULTS_PATH']) utils.saveListAsCsvFile(winningSpikeCounts,settings['SPIKE_COUNT_RESULTS_PATH']) classifier.end() #write a marker file to allow invoking programs to know that the Python/Pynn run completed utils.saveListToFile(['Pynn Run complete'],settings['RUN_COMPLETE_FILE']) print 'PyNN run completed.' return scorePercent
def printModelConfigurationSummary(params, populationsInput, populationsNoiseSource, populationsRN, populationsPN, populationsAN): totalPops = len(populationsInput) + len(populationsNoiseSource) + \ len(populationsRN) + len(populationsPN) + len(populationsAN) stdMaxNeuronsPerCore = params['MAX_NEURONS_PER_CORE'] stdpMaxNeuronsPerCore = params['MAX_STDP_NEURONS_PER_CORE'] inputCores = utils.coresRequired(populationsInput, stdMaxNeuronsPerCore) noiseCores = utils.coresRequired(populationsNoiseSource, stdMaxNeuronsPerCore) rnCores = utils.coresRequired(populationsRN, stdMaxNeuronsPerCore) pnCores = utils.coresRequired(populationsPN, stdMaxNeuronsPerCore) anCores = utils.coresRequired(populationsAN, stdpMaxNeuronsPerCore) utils.printSeparator() print 'Population(Cores) Summary' utils.printSeparator() print 'Input: ', len(populationsInput), '(', inputCores, ' cores)' print 'Noise: ', len(populationsNoiseSource), '(', noiseCores, ' cores)' print 'RN: ', len(populationsRN), '(', rnCores, ' cores)' print 'PN: ', len(populationsPN), '(', pnCores, ' cores)' print 'AN: ', len(populationsAN), '(', anCores, ' cores)' print 'TOTAL: ', totalPops, '(', inputCores + noiseCores + rnCores + \ pnCores + anCores, ' cores)' utils.printSeparator()
def runClassifier(params, settings, fold): classifier.printParameters('Model Parameters', params) classifier.printParameters('Classifier Settings', settings) populationsInput = list() populationsNoiseSource = list() populationsRN = list() populationsPN = list() populationsAN = list() projectionsPNAN = list() #keep handle to these for saving learnt weights if settings['LEARNING']: totalSimulationTime = float(settings['OBSERVATION_EXPOSURE_TIME_MS'] * settings['NUM_OBSERVATIONS']) else: totalSimulationTime = float(settings['OBSERVATION_EXPOSURE_TIME_MS'] * settings['NUM_OBSERVATIONS_TEST']) print 'Total Simulation Time will be', totalSimulationTime DT = 1.0 #ms Integration timestep for simulation classifier.setupModel(params, settings, DT, totalSimulationTime, populationsInput, populationsNoiseSource, populationsRN, populationsPN, populationsAN, projectionsPNAN) utils.recordPopulations(populationsInput, settings['RECORD_POP_INPUT']) utils.recordPopulations(populationsNoiseSource, settings['RECORD_POP_NOISE_SOURCE']) utils.recordPopulations(populationsRN, settings['RECORD_POP_RN']) utils.recordPopulations(populationsPN, settings['RECORD_POP_PN']) utils.recordPopulations(populationsAN, settings['RECORD_POP_AN']) #run the model for the whole learning or the whole testing period classifier.run(totalSimulationTime) fig1 = plt.figure(figsize=(20, 20)) plt.xlabel('Time[ms]', fontsize=16) plt.ylabel('Neurons', fontsize=16) title = 'Testing' if settings['LEARNING']: title = 'Training' title = title + ' - Odour Classification - ' + str(params['NUM_VR']) + \ ' Virtual Receptors' fig1.suptitle(title, fontsize=18) indexOffset = 0 indexOffset = 1 + utils.plotAllSpikes(populationsInput, totalSimulationTime, indexOffset, settings['RECORD_POP_INPUT']) indexOffset = 1 + utils.plotAllSpikes(populationsNoiseSource, totalSimulationTime, indexOffset, settings['RECORD_POP_NOISE_SOURCE']) indexOffset = 1 + utils.plotAllSpikes(populationsRN, totalSimulationTime, indexOffset, settings['RECORD_POP_RN']) indexOffset = 1 + utils.plotAllSpikes(populationsPN, totalSimulationTime, indexOffset, settings['RECORD_POP_PN']) indexOffset = 1 + utils.plotAllSpikes(populationsAN, totalSimulationTime, indexOffset, settings['RECORD_POP_AN']) filename = 'RasterPlot-Testing-fold' + str(fold) + '.pdf' if settings['LEARNING']: filename = 'RasterPlot-Training-fold' + str(fold) + '.pdf' plt.savefig(filename) plt.close() (fig2, (ax1, ax2, ax3)) = plt.subplots(3, 1, figsize=(20, 20), sharex=True) plt.axes(ax1) utils.plotAllSpikes(populationsRN, totalSimulationTime, 0, settings['RECORD_POP_RN']) plt.axes(ax2) utils.plotAllSpikes(populationsPN, totalSimulationTime, 0, settings['RECORD_POP_PN']) plt.axes(ax3) utils.plotAllSpikes(populationsAN, totalSimulationTime, 0, settings['RECORD_POP_AN']) ax1.set_title('RN layer spikes', fontsize=30) ax2.set_title('PN layer spikes', fontsize=30) ax3.set_title('AN layer spikes', fontsize=30) ax3.set_xlabel('Simulation time[ms]', fontsize=30) ax3.set_ylabel('Neuron indices', fontsize=30) ax3.tick_params(labelsize=20) ax2.tick_params(labelsize=20) ax1.tick_params(labelsize=20) filename = 'Separated_RasterPlot-Testing-fold' + str(fold) + '.pdf' if settings['LEARNING']: filename = 'Separated_RasterPlot-Training-fold' + str(fold) + '.pdf' plt.savefig(filename) plt.close() # fig.add_subplot(2,1,2) # utils.plotAllSpikes(populationsAN,totalSimulationTime, 0, settings['RECORD_POP_AN']) #if in the learning stage if settings['LEARNING']: #store the weight values learnt via plasticity, these will be reloaded as #static weights for test stage classLabels = utils.loadListFromCsvFile(settings['CLASS_LABELS_TRAIN'], True) classifier.saveLearntWeightsPNAN(settings, params, projectionsPNAN, len(populationsPN), len(populationsAN)) winningClassesByObservation, winningSpikeCounts = classifier.calculateWinnersAN( settings, populationsAN, classLabels) scorePercent = classifier.calculateScore(winningClassesByObservation, classLabels) else: #save the AN layer spike data from the testing run. #This data will be interrogated to find the winning class (most active AN pop) #during the presentation of each test observation #classifier.saveSpikesAN(settings,populationsAN) classLabels = utils.loadListFromCsvFile(settings['CLASS_LABELS_TEST'], True) winningClassesByObservation, winningSpikeCounts = classifier.calculateWinnersAN( settings, populationsAN, classLabels) scorePercent = classifier.calculateScore(winningClassesByObservation, classLabels) utils.saveListAsCsvFile(winningClassesByObservation, settings['CLASSIFICATION_RESULTS_PATH']) utils.saveListAsCsvFile(winningSpikeCounts, settings['SPIKE_COUNT_RESULTS_PATH']) classifier.end() #write a marker file to allow invoking programs to know that the Python/Pynn run completed utils.saveListToFile(['Pynn Run complete'], settings['RUN_COMPLETE_FILE']) print 'PyNN run completed.' return scorePercent
#write a marker file to allow invoking programs to know that the Python/Pynn run completed utils.saveListToFile(['Pynn Run complete'], settings['RUN_COMPLETE_FILE']) print 'PyNN run completed.' return scorePercent # if run as top-level script if __name__ == "__main__": params = eval(open("ModelParams-eNoseClassifier.txt").read()) settings = eval(open("Settings-eNoseClassifier.txt").read()) #clear marker file if utils.fileExists(settings['RUN_COMPLETE_FILE']): os.remove(settings['RUN_COMPLETE_FILE']) #Override default params with any passed args numArgumentsProvided = len(sys.argv) - 1 if numArgumentsProvided >= 1: settings['LEARNING'] = eval(sys.argv[1]) if numArgumentsProvided >= 2: params['NUM_VR'] = int(sys.argv[2]) if numArgumentsProvided >= 3: params['NUM_CLASSES'] = int(sys.argv[3]) if numArgumentsProvided >= 4: settings['SPIKE_SOURCE_VR_RESPONSE_TRAIN'] = sys.argv[4] if numArgumentsProvided >= 5: settings['SPIKE_SOURCE_VR_RESPONSE_TEST'] = sys.argv[5]
def setupLayerInput(params, settings, populationsInput): numVR = params['NUM_VR'] numRatecodeNeurons = numVR spikeSourceVrResponsePath = settings['SPIKE_SOURCE_VR_RESPONSE_TRAIN'] spikeSourceVrResponsePathTest = settings['SPIKE_SOURCE_VR_RESPONSE_TEST'] spikeSourceActiveClassPath = settings['SPIKE_SOURCE_CLASS_ACTIVATIONS'] learning = settings['LEARNING'] if learning: #Create a population, one neuron per VR, #where each neuron wil be loaded with the rate code spikes for the #VR response over the training set spikeDataVR = utils.readSpikeSourceDataFile(spikeSourceVrResponsePath) popRateCodeSpikes = spynnaker.Population(numRatecodeNeurons, spynnaker.SpikeSourceArray, spikeDataVR, label='popRateCodeSpikes') populationsInput.append(popRateCodeSpikes) #Create a population, one neuron per class, #During training the neuron representing the current class will be #active with significant spikes, the others will be quiet. # #The purpose is to innervate the relevant output class cluster/population #so that fire-together-wire-together hebbian learning (via STDP) #stregthens synapses from active PN clusters # #During testing all these neurons will be silent, leaving #the strengthened synapses to trigger activity direct from PN layer #in the correct output cluster. spikeDataClass = utils.readSpikeSourceDataFile( spikeSourceActiveClassPath) numNeurons = params['NUM_CLASSES'] popClassActivationSpikes = spynnaker.Population( numNeurons, spynnaker.SpikeSourceArray, spikeDataClass, label='popClassActivationSpikes') populationsInput.append(popClassActivationSpikes) else: #Create a population, one neuron per VR, #where each neuron wil be loaded with the rate code spikes for #the VR response over the test set spikeDataVRTest = utils.readSpikeSourceDataFile( spikeSourceVrResponsePathTest) popRateCodeSpikesTest = spynnaker.Population( numRatecodeNeurons, spynnaker.SpikeSourceArray, spikeDataVRTest, label='popRateCodeSpikes') populationsInput.append(popRateCodeSpikesTest) #create an orphan dummy popn of 1 neuron to take the place of the now #unused spike source pop used in learning #This is to ensure that the freed up core does not get co-opted by the #PN layer config routine # as this would makae the learning and testing configurations different #in PN which would likely make the saved PNAN weight arrays incorrect popClassActivationSpikes = spynnaker.Population( 1, neuronModel, cell_params, label='dummy_popClassActivationSpikes') populationsInput.append(popClassActivationSpikes)
def saveSpikesAN(settings, populationsAN): for i in range(len(populationsAN)): path = settings["CACHE_DIR"] + "/Spikes_Class" + str(i) + ".csv" utils.saveSpikesToFile(populationsAN[i], path)
plt.savefig('Crossvalidation_scores.pdf') plt.close() else: settingsClassifier['NUM_REPETITIONS'] = 1 print 'Training classifier...........................................' train_classifier(paramsClassifier, settingsClassifier, baselineValues) print 'Training completed' raw_input("Press Enter to proceed to testing...") #baselineValues = make_eNose_baseline.baseline_init_eNose() test_classifier(paramsClassifier, settingsClassifier, baselineValues) # plot VR spike source for training totalSimulationTime = float(observationTime*nrObsTrain) classLabels = utils.loadListFromCsvFile(classLabelsTrain,True) plot_spike_sources(masterPath, spikeSourceVRTrain, nrInputNeurons, nrVR, observationTime, totalSimulationTime, classLabels, odourNames) # plot VR spike source for testing totalSimulationTime = float(observationTime*nrObsTest) classLabels = utils.loadListFromCsvFile(classLabelsTest,True) plot_spike_sources(masterPath, spikeSourceVRTest, nrInputNeurons, nrVR, observationTime, totalSimulationTime, classLabels, odourNames)
import matplotlib.pyplot as plt import Classifier_LiveSpikingInput as classifier import ModellingUtils as utils import sys import os.path import os import time params = eval(open("ModelParams-MNISTClassifier.txt").read()) settings = eval(open("Settings-MNISTClassifier.txt").read()) #clear marker file if utils.fileExists(settings['RUN_COMPLETE_FILE']): os.remove(settings['RUN_COMPLETE_FILE']) #Override default params with any passed args numArgumentsProvided = len(sys.argv) - 1 if numArgumentsProvided >=1 : settings['LEARNING'] = eval(sys.argv[1]) if numArgumentsProvided >=2 : params['NUM_VR'] = int(sys.argv[2]) if numArgumentsProvided >=3 : params['NUM_CLASSES'] = int(sys.argv[3]) if numArgumentsProvided >=4 : settings['RN_SPIKE_INJECTION_PORT'] = int(sys.argv[4]) if numArgumentsProvided >=5 : settings['RN_SPIKE_INJECTION_POP_LABEL'] = sys.argv[5] if numArgumentsProvided >=6 : settings['CLASS_ACTIVATION_SPIKE_INJECTION_PORT'] = int(sys.argv[6]) if numArgumentsProvided >=7 :
def saveSpikesAN(settings, populationsAN): for i in range(len(populationsAN)): path = settings['CACHE_DIR'] + '/Spikes_Class' + str(i) + '.csv' utils.saveSpikesToFile(populationsAN[i], path)
classifier.end() #write a marker file to allow invoking programs to know that the Python/Pynn run completed utils.saveListToFile(['Pynn Run complete'],settings['RUN_COMPLETE_FILE']) print 'PyNN run completed.' return scorePercent # if run as top-level script if __name__ == "__main__": params = eval(open("ModelParams-eNoseClassifier.txt").read()) settings = eval(open("Settings-eNoseClassifier.txt").read()) #clear marker file if utils.fileExists(settings['RUN_COMPLETE_FILE']): os.remove(settings['RUN_COMPLETE_FILE']) #Override default params with any passed args numArgumentsProvided = len(sys.argv) - 1 if numArgumentsProvided >=1 : settings['LEARNING'] = eval(sys.argv[1]) if numArgumentsProvided >=2 : params['NUM_VR'] = int(sys.argv[2]) if numArgumentsProvided >=3 : params['NUM_CLASSES'] = int(sys.argv[3]) if numArgumentsProvided >=4 : settings['SPIKE_SOURCE_VR_RESPONSE_TRAIN'] = sys.argv[4] if numArgumentsProvided >=5 : settings['SPIKE_SOURCE_VR_RESPONSE_TEST'] = sys.argv[5]
def calculateWinnersAN(settings,populationsAN): numClasses = len(populationsAN) numObservations = settings['NUM_OBSERVATIONS'] observationExposureTimeMs = float(settings['OBSERVATION_EXPOSURE_TIME_MS']) #classActivationExposureFraction = settings['CLASS_ACTIVATION_EXPOSURE_FRACTION'] #set up lists to hold highest spike count and current winning class so far for each observation winningSpikeCount = [0] * numObservations winningClass = [-1] * numObservations spikeInputStartMs = 10000.0 #set spike count window to most , not all the exposure time allSpikesByClass = list() ''' This approach is faling during Test. There are much earlier ghost spikes in AN Try setting up mini pop fed by all of spike injection neurons with strong weigting. Any true injected spikes will trigger this pop, use it to mark start of true input ''' #we don't know when first live input spikes will appear as the model takes a varying amount of time to startup #assume that first spike in whole AN layer is very close to the start of input from the spike sender #Go thorugh all AN spikes and mark the earliest for cls in range(numClasses): allSpikes = populationsAN[cls].getSpikes(compatible_output=True) allSpikesByClass.append(allSpikes) spikeTimeMs = utils.getFirstSpikeTime(allSpikes) if spikeTimeMs < spikeInputStartMs: spikeInputStartMs = spikeTimeMs print 'class ' , cls, 'spikeInputStartMs updated to ' , spikeInputStartMs presentationStartTimes = utils.loadListFromFile("PresentationTimes.txt") print 'Loaded presentation times from file:', presentationStartTimes for cls in range(numClasses): allSpikes = allSpikesByClass[cls] #get ptr to spikes extracted for this class over whole duration for observation in range(numObservations): observationStartMs = presentationStartTimes[observation] ''' #don't set to whole observationExposureTimeMs because first spike may not be right at the start therefore could catch spikes from next observation observationWindowMs = float(0.9 * observationExposureTimeMs) offsetMs = spikeInputStartMs + 0.5 * (observationExposureTimeMs-observationWindowMs) startMs = offsetMs + (observation * observationExposureTimeMs) ''' startMs = spikeInputStartMs + observationStartMs endMs = startMs + observationExposureTimeMs observationSpikes = utils.getSpikesBetween(startMs,endMs,allSpikes) spikeCount= observationSpikes.shape[0] print 'Observation:', observation, 'StartMs:', startMs, 'EndMs:', endMs, 'Class:' , cls, 'Spikes:' , spikeCount if spikeCount > winningSpikeCount[observation]: winningSpikeCount[observation] = spikeCount winningClass[observation] = cls print 'Winning Class for each observation:' print winningClass return winningClass