def check_if_vertex_needs_merging(self, vertex, virtual_vertexs): #for each virtual vertex, merge entries which have the same chip ids merged = False index = 0 while not merged and index <= len(virtual_vertexs) -1: compare = virtual_vertexs[index] compare_coords = compare.virtual_chip_coords compare_con_coords = compare.connected_chip_coords if (compare_coords['x'] == vertex.virtual_chip_coords['x'] and compare_coords['y'] == vertex.virtual_chip_coords['y']): if (compare_con_coords['x'] == vertex.connected_chip_coords['x'] and compare_con_coords['y'] == vertex.connected_chip_coords['y'] and compare.connected_chip_edge == vertex.connected_chip_edge): merged = True else: raise exceptions.PacmanException("cannot merge entries as " "the connected corods and " "edge are not identicle") elif (compare_con_coords['x'] == vertex.connected_chip_coords['x'] and compare_con_coords['y'] == vertex.connected_chip_coords['y'] and compare.connected_chip_edge == vertex.connected_chip_edge): raise exceptions.PacmanException("cannot merge entries as " "the connected corods and edge" "are identicle to some other " "virtual chip with different " "coords") index += 1 if not merged: virtual_vertexs.append(vertex)
def _check_synapse_dynamics(self, in_edges): if self._stdp_checked: return True self._stdp_checked = True for in_edge in in_edges: if (isinstance(in_edge, ProjectionEdge) and in_edge.synapse_dynamics is not None): if in_edge.synapse_dynamics.fast is not None: raise exceptions.PacmanException( "Fast synapse dynamics are not supported") elif in_edge.synapse_dynamics.slow is not None: if self._stdp_mechanism is None: self._stdp_mechanism = in_edge.synapse_dynamics.slow else: if not (self._stdp_mechanism == in_edge.synapse_dynamics.slow): raise exceptions.PacmanException( "Different STDP mechanisms on the same" + " vertex are not supported")
def getSpikes(self, controller, runtime, compatible_output=False): if not controller.dao.has_ran: raise exceptions.PacmanException("The simulation has not yet ran," "therefore spikes cannot be " "retrieved") # Spike sources store spike vectors optimally so calculate min words to represent subVertexOutSpikeBytesFunction = lambda subvertex : OUT_SPIKE_BYTES # Use standard behaviour to read spikes return self._getSpikes(controller, compatible_output, REGIONS.SPIKE_HISTORY, subVertexOutSpikeBytesFunction, runtime)
def get_gsyn(self, controller, gather=True, compatible_output=False): """ Return a 3-column numpy array containing cell ids and synaptic conductances for recorded cells. """ logger.info("Getting gsyn for %s" % (self.label)) if not controller.dao.has_ran: raise exceptions.PacmanException("The simulation has not yet ran," "therefore gsyn cannot be " "retrieved") return self.get_neuron_parameter(REGIONS.GSYN_HISTORY, compatible_output, controller)
def get_v(self, controller, gather=True, compatible_output=False): """ Return a 3-column numpy array containing cell ids, time, and Vm for recorded cells. :param bool gather: not used - inserted to match PyNN specs :param bool compatible_output: not used - inserted to match PyNN specs """ logger.info("Getting v for %s" % (self.label)) if not controller.dao.has_ran: raise exceptions.PacmanException("The simulation has not yet ran," "therefore v cannot be " "retrieved") return self.get_neuron_parameter(REGIONS.POTENTIAL_HISTORY, compatible_output, controller)
def test_fourty_eight_board(number_of_dests): ''' checks all combinations for a 48 node board ''' perms = [] board48gaps = [[0, 4], [0, 5], [0, 6], [0, 7], [1, 5], [1, 6], [1, 7], [2, 6], [2, 7], [3, 7], [5, 0], [6, 0], [6, 1], [7, 0], [7, 1], [7, 2]] for x in range(8): for y in range(8): if [x, y] not in board48gaps: for p in range(1, 17): perms.append([x, y, p]) #start testing fails = ExshastiveRouteGeneration.explore_all_routes( perms, "spinn-7", number_of_dests) if len(fails) != 0: raise exceptions.PacmanException(fails)
def test_four_chip_board(number_of_dests): ''' checks all combinations for a 4 node board ''' perms = [] #make permutations for placement locations. for x in range(2): for y in range(2): for p in range(1, 17): perms.append([x, y, p]) #start testing ''' source_subvert, dest_subverts, machine, subedges, fails = \ ExshastiveRouteGeneration.try_creating_route([0,0,1], [[0,1,1],[1,0,1]], "amu12") fail = ExshastiveRouteGeneration.retrace_route(source_subvert, dest_subverts, machine, subedges) ''' fails = ExshastiveRouteGeneration.explore_all_routes( perms, "amu12", number_of_dests) if len(fails) != 0: raise exceptions.PacmanException(fails)
def __init__(self, presynaptic_population, postsynaptic_population, connector, source=None, target='excitatory', synapse_dynamics=None, label=None, rng=None): """ Instantiates a :py:object:`Projection`. """ global controller self.projection_edge = None if issubclass(type(postsynaptic_population.vertex), PopulationVertex): # Check that the "target" is an acceptable value targets = postsynaptic_population.vertex.get_synapse_targets() if not target in targets: raise exceptions.PacmanException( "Target {} is not available in " + "the post-synaptic population (choices are {})".format( target, targets)) synapse_type = postsynaptic_population.vertex.get_synapse_id( target) else: raise exceptions.ConfigurationException( "postsynaptic_population is " "not a supposal reciever of" " synaptic projections") # Check that the edge doesn't already exist elsewhere # This would be a possible place for a merge at some point, # but this needs more thought for edge in controller.dao.get_edges(): if (edge.prevertex == presynaptic_population.vertex and edge.postvertex == postsynaptic_population.vertex): raise exceptions.PacmanException( "More than one connection between the same pair of" + " vertices is not currently supported") synapse_list = connector.generate_synapse_list( presynaptic_population.vertex, postsynaptic_population.vertex, 1000.0 / controller.dao.machineTimeStep, synapse_type) self.read_synapse_list = None # If there are some negative weights if synapse_list.get_min_weight() < 0: # If there are mixed negative and positive weights, # raise an exception if synapse_list.get_max_weight() > 0: raise exceptions.PacmanException("Weights must be positive") # Otherwise, the weights are all negative, so invert them(!) else: synapse_list.flip() # check if all delays requested can fit into the natively supported # delays in the models min_delay, max_delay = synapse_list.get_min_max_delay() natively_supported_delay_for_models = MAX_SUPPORTED_DELAY_TICS delay_extention_max_supported_delay = \ MAX_DELAY_BLOCKS * MAX_TIMER_TICS_SUPPORTED_PER_BLOCK if max_delay > (natively_supported_delay_for_models + delay_extention_max_supported_delay): raise exceptions.ConfigurationException( "the max delay for projection {} is not " "supported by the pacman " "toolchain".format(max_delay)) if conf.config.has_option("Model", "max_delay"): user_max_delay = conf.config.get("Model", "max_delay") if max_delay > user_max_delay: logger.warn( "The end user entered a max delay to which the projection breaks" ) if (max_delay > natively_supported_delay_for_models): source_sz = presynaptic_population.vertex.atoms self._addDelayExtension(source_sz, max_delay, natively_supported_delay_for_models, connector, synapse_list, presynaptic_population, postsynaptic_population, label, synapse_dynamics) else: self.projection_edge = ProjectionEdge( presynaptic_population.vertex, postsynaptic_population.vertex, controller.dao.machineTimeStep, synapse_list=synapse_list, synapse_dynamics=synapse_dynamics, label=label) self.delay_edge = None controller.add_edge(self.projection_edge)
def setup(timestep=None, min_delay=None, max_delay=None, **kwargs): """ Should be called at the very beginning of a script. extra_params contains any keyword arguments that are required by a given simulator but not by others. For simulation on SpiNNaker the following parameters are mandatory: :param `pacman103.lib.lib_machine` machine: A SpiNNaker machine used to run the simulation. The setup() call instantiates a :py:class:`pacman103.core.control.Controller` object which is used as a global variable throughout the whole process. It also creates an AppMonitor Object (a vertex with model-type AppMon), placing a mapping constraint on it so that it is on chip (0,0). This functionality may move elsewhere later. NB: timestep, min_delay and max_delay are required by the PyNN API but we ignore them because they have no bearing on the on-chip simulation code. """ global controller logger.info("PACMAN103 (c) 2014 APT Group, University of Manchester") logger.info(" Release version 2014.4.1 - April 2014") # Raise an exception if no SpiNNaker machine is specified if kwargs.has_key("machine"): machine_name = kwargs.get("machine") logger.warn("The machine name from kwargs is overriding the machine " "name defined in the pacman.cfg file") elif conf.config.has_option("Machine", "machineName"): machine_name = conf.config.get("Machine", "machineName") else: raise Exception("A SpiNNaker machine must be specified in pacman.cfg.") if machine_name == 'None': raise Exception("A SpiNNaker machine must be specified in pacman.cfg.") reload_time = None if conf.config.has_option("Execute", "reload_date"): reload_time = conf.config.get("Execute", "reload_date") if reload_time != 'None': logger.warn( "The reload parameter was set, therefore not recompiling") else: reload_time = None #deal with params allowed via the setup optimals if timestep is not None: timestep *= 1000 # convert into ms from microseconds conf.config.set("Machine", "machineTimeStep", timestep) else: timestep = conf.config.get("Machine", "machineTimeStep") if min_delay is not None and float(min_delay * 1000) < 1.0 * timestep: raise exceptions.ConfigurationException( "Pacman does not support min " "delays below {} ms with the current " "machine time step".format(1.0 * timestep)) natively_supported_delay_for_models = MAX_SUPPORTED_DELAY_TICS delay_extention_max_supported_delay = MAX_DELAY_BLOCKS * MAX_TIMER_TICS_SUPPORTED_PER_BLOCK max_delay_tics_supported = \ natively_supported_delay_for_models + delay_extention_max_supported_delay if max_delay is not None and float( max_delay * 1000) > max_delay_tics_supported * timestep: raise exceptions.ConfigurationException( "Pacman does not support max delays " "above {} ms with the current machine " "time step".format(0.144 * timestep)) if min_delay is not None: conf.config.add_section("Model") conf.config.set("Model", "min_delay", (min_delay * 1000) / timestep) if max_delay is not None: if not conf.config.has_section("Model"): conf.config.add_section("Model") conf.config.set("Model", "max_delay", (max_delay * 1000) / timestep) time_scale_factor = None if (conf.config.has_option("Machine", "timeScaleFactor") and conf.config.get("Machine", "timeScaleFactor") != "None"): time_scale_factor = conf.config.getint("Machine", "timeScaleFactor") if timestep * time_scale_factor < 1000: logger.warn( "the combination of machine time step and the machine " "time scale factor results in a real timer tic that is " "currently not reliably supported by the spinnaker " "machine.") else: time_scale_factor = max(1, math.ceil(1000.0 / float(timestep))) if time_scale_factor > 1: logger.warn( "A timestep was entered that has forced pacman103 to " "automatically slow the simulation down from real time " "by a factor of {}. To remove this automatic behaviour" ", please enter a timescaleFactor value in " "your .pacman.cfg".format(time_scale_factor)) # Create a new Controller to run PyNN: controller = control.Controller(sys.modules[__name__], machine_name, reload_time=reload_time) # Set the app ID: appID = conf.config.getint("Machine", "appID") controller.dao.app_id = appID logger.info("Setting appID to %d." % appID) # Set the machine time step for the simulation: machineTimeStep = conf.config.getint("Machine", "machineTimeStep") controller.dao.machineTimeStep = machineTimeStep logger.info("Setting machine time step to %d micro-seconds." % machineTimeStep) controller.dao.time_scale_factor = time_scale_factor logger.info("Setting time scale factor to %d." % time_scale_factor) # Set boolean variable writeTextSpecs in DAO if we are required to: writeTextSpecs = False if conf.config.getboolean("Reports", "reportsEnabled"): writeTextSpecs = conf.config.getboolean("Reports", "writeTextSpecs") controller.dao.writeTextSpecs = writeTextSpecs if conf.config.has_option("Recording", "send_live_spikes"): if conf.config.getboolean("Recording", "send_live_spikes") == True: port = None if conf.config.has_option("Recording", "live_spike_port"): port = conf.config.getint("Recording", "live_spike_port") hostname = "localhost" if conf.config.has_option("Recording", "live_spike_host"): hostname = conf.config.get("Recording", "live_spike_host") tag = None if conf.config.has_option("Recording", "live_spike_tag"): tag = conf.config.getint("Recording", "live_spike_tag") if tag == None: raise exceptions.PacmanException("Target tag for live spikes " "has not been set") # Set up the forwarding so that monitored spikes are sent to the # requested location controller.set_tag_output(tag, port, hostname, 10) #takes the same port for the visualiser if being used if conf.config.getboolean("Visualiser", "enable") and \ conf.config.getboolean("Visualiser", "have_board"): controller.set_visulaiser_port(port) # Create special AppMonitor vertex, to receive spikes for immediate # transfer to host: monitorVertex = AppMonitor() # Add the special vertex to the list of vertices: controller.add_vertex(monitorVertex) # Get track of this special vertex as it will be used as a target # for recorded spikes: global appMonitorVertex appMonitorVertex = monitorVertex # PyNN API says something must be returned, # so we return something useful: our controller instance return controller
def get_chip(self, x, y): if self.chip_exists_at(x, y): return self._chips["{}:{}".format(x, y)] else: raise exceptions.PacmanException("no chip with coords " "{}:{}".format(x, y))
def add_virtual_chips(self): for virtual_chip in self.virtual_chips: #create and add a new chip coords = virtual_chip.virtual_chip_coords #check that the virtual chip does not corrapsond to a real chip if self.chip_exists_at(coords['x'], coords['y']): raise exceptions.PacmanException("the virtual chip currently " "corrasponds to a real chip, " "therefore fails") chip = board_chip.Chip(self, coords['x'], coords['y'], -1, 256, virtual=True) self._chips[chip.toString()] = chip #connect it to its predefined neighbour connected_chip_coords = virtual_chip.connected_chip_coords real_chip = self.get_chip(connected_chip_coords['x'], connected_chip_coords['y']) real_chips_router = real_chip.router link_id = virtual_chip.connected_chip_edge new_link = { 'x': coords['x'], 'y': coords['y'], '16bit': 256, 'object': chip.router } if link_id in real_chips_router.linksdownlist: real_chips_router.linksdownlist.remove(link_id) else: raise exceptions.PacmanException( "connecting an external device " "to a up link is not currently " "supported in pacman103") real_chips_router.neighbourlist[link_id] = new_link #connect and update the fake routers neaighbour lists new_link_id = 0 #new link has to be the opposite direction to the orginal if link_id == 0: new_link_id = 3 elif link_id == 1: new_link_id = 4 elif link_id == 2: new_link_id = 5 elif link_id == 3: new_link_id = 0 elif link_id == 4: new_link_id = 1 elif link_id == 5: new_link_id = 2 new_link = { 'x': connected_chip_coords['x'], 'y': connected_chip_coords['y'], '16bit': 256, 'object': real_chips_router } fake_chips_router = chip.router fake_chips_router.linksuplist.append(new_link_id) for index in range(6): if index == new_link_id: fake_chips_router.neighbourlist.append(new_link) else: fake_chips_router.neighbourlist.append(None) fake_chips_router.linksdownlist.append(index)
def get_neuron_parameter(self, region, compatible_output, controller): if not controller.dao.has_ran: raise exceptions.PacmanException("The simulation has not yet ran," "therefore gsyn cannot be " "retrieved") value = numpy.zeros((0, 3)) # Find all the sub-vertices that this population exists on for subvertex in self.subvertices: (x, y, p) = subvertex.placement.processor.get_coordinates() controller.txrx.select(x, y) # Get the App Data for the core appDataBaseAddressOffset = getAppDataBaseAddressOffset(p) appDataBaseAddressBuf = controller.txrx.memory_calls.read_mem( appDataBaseAddressOffset, scamp.TYPE_WORD, 4) appDataBaseAddress = struct.unpack("<I", appDataBaseAddressBuf)[0] # Get the position of the value buffer vRegionBaseAddressOffset = getRegionBaseAddressOffset( appDataBaseAddress, region) vRegionBaseAddressBuf = controller.txrx.memory_calls.read_mem( vRegionBaseAddressOffset, scamp.TYPE_WORD, 4) vRegionBaseAddress = struct.unpack("<I", vRegionBaseAddressBuf)[0] vRegionBaseAddress += appDataBaseAddress # Read the size numberOfBytesWrittenBuf = controller.txrx.memory_calls.read_mem( vRegionBaseAddress, scamp.TYPE_WORD, 4) numberOfBytesWritten = struct.unpack_from("<I", numberOfBytesWrittenBuf)[0] # Read the values logger.debug("Reading %d (%s) bytes starting at %s" %(numberOfBytesWritten, hex(numberOfBytesWritten), hex(vRegionBaseAddress + 4))) vData = controller.txrx.memory_calls.read_mem( vRegionBaseAddress + 4, scamp.TYPE_WORD, numberOfBytesWritten) bytesPerTimeStep = subvertex.n_atoms * 4 numberOfTimeStepsWritten = numberOfBytesWritten / bytesPerTimeStep msPerTimestep = controller.dao.machineTimeStep / 1000.0 logger.debug("Processing %d timesteps" % numberOfTimeStepsWritten) # Standard fixed-point 'accum' type scaling size = len(vData) / 4 scale = numpy.zeros(size, dtype=numpy.float) scale.fill(float(0x7FFF)) # Add an array for time and neuron id time = numpy.array([int(i / subvertex.n_atoms) * msPerTimestep for i in range(size)], dtype=numpy.float) neuronId = numpy.array([int(i % subvertex.n_atoms) + subvertex.lo_atom for i in range(size)], dtype=numpy.uint32) # Get the values tempValue = numpy.frombuffer(vData, dtype="<i4") tempValue = numpy.divide(tempValue, scale) tempArray = numpy.dstack((time, neuronId, tempValue)) tempArray = numpy.reshape(tempArray, newshape=(-1, 3)) value = numpy.append(value, tempArray, axis=0) logger.debug("Arranging parameter output") if compatible_output == True: # Change the order to be neuronID : time (don't know why - this # is how it was done in the old code, so I am doing it here too) value[:,[0,1,2]] = value[:,[1,0,2]] # Sort by neuron ID and not by time vIndex = numpy.lexsort((value[:,2], value[:,1], value[:,0])) value = value[vIndex] return value # If not compatible output, we will sort by time (as NEST seems to do) vIndex = numpy.lexsort((value[:,2], value[:,1], value[:,0])) value = value[vIndex] return value
def add_processor(self, chip_processor, phyid): if phyid in self._processors.keys(): raise exceptions.PacmanException("trying to add a " "processor that already exists") else: self._processors[phyid] = chip_processor
def get_processor(self, idx): if idx in self._processors: return self._processors[idx] else: raise exceptions.PacmanException("no processor with id " "{}".format(idx))