def run(self, mbs, x, y): # setup system sim.setup(model_binary_module=( speed_tracker_with_protocol_search_c_code_version)) # build verts reader = SDRAMReaderAndTransmitterWithProtocol(mbs) reader.add_constraint(ChipAndCoreConstraint(x=x, y=y)) receiver = PacketGathererWithProtocol() # add verts to graph sim.add_machine_vertex_instance(reader) sim.add_machine_vertex_instance(receiver) # build and add edge to graph sim.add_machine_edge_instance(MachineEdge(reader, receiver), "TRANSMIT") machine = sim.machine() if machine.is_chip_at(x, y): return self._do_run(reader, receiver, mbs) else: sim.stop() return None, False, False, "", 0
from pacman.model.graphs.machine import MachineEdge from pkt_injector_vertex import Pkt_Injector_Vertex from pkt_extractor_vertex import Pkt_Extractor_Vertex NUM_INJECTORS = 9 gfe.setup(machine_time_step=1000000, n_chips_required=1, model_binary_folder=os.path.dirname(__file__)) # instantiate injector vertices injectors = [] for i in range(NUM_INJECTORS): iv = Pkt_Injector_Vertex(i) gfe.add_machine_vertex_instance(iv) injectors.append(iv) # instantiate extractor vertices ev = Pkt_Extractor_Vertex() gfe.add_machine_vertex_instance(ev) # create links from injectors to extractor for iv in injectors: gfe.add_machine_edge_instance(MachineEdge(iv, ev), iv.inj_lnk) gfe.run(10000) gfe.stop()
def add_machine_edge_instance(source, dest, partition): global partition_manager partition_manager.add_outgoing_partition(source, partition) front_end.add_machine_edge_instance(__generate_machine_edge( source, dest, partition), partition)
positions = [(x, (y + 1) % MAX_Y_SIZE_OF_FABRIC, "N"), ((x + 1) % MAX_X_SIZE_OF_FABRIC, (y + 1) % MAX_Y_SIZE_OF_FABRIC, "NE"), ((x + 1) % MAX_X_SIZE_OF_FABRIC, y, "E"), ((x + 1) % MAX_X_SIZE_OF_FABRIC, (y - 1) % MAX_Y_SIZE_OF_FABRIC, "SE"), (x, (y - 1) % MAX_Y_SIZE_OF_FABRIC, "S"), ((x - 1) % MAX_X_SIZE_OF_FABRIC, (y - 1) % MAX_Y_SIZE_OF_FABRIC, "SW"), ((x - 1) % MAX_X_SIZE_OF_FABRIC, y, "W"), ((x - 1) % MAX_X_SIZE_OF_FABRIC, (y + 1) % MAX_Y_SIZE_OF_FABRIC, "NW")] for (dest_x, dest_y, compass) in positions: front_end.add_machine_edge_instance( MachineEdge(vertices[x][y], vertices[dest_x][dest_y], label=compass), ConwayBasicCell.PARTITION_ID) # run the simulation front_end.run(runtime) # get recorded data recorded_data = dict() # get the data per vertex for x in range(0, MAX_X_SIZE_OF_FABRIC): for y in range(0, MAX_Y_SIZE_OF_FABRIC): recorded_data[(x, y)] = vertices[x][y].get_data( front_end.buffer_manager(), front_end.placements().get_placement_of_vertex(vertices[x][y]))
(y + 1) % MAX_Y_SIZE_OF_FABRIC, "SE"), ((x + 1) % MAX_X_SIZE_OF_FABRIC, y, "S"), ((x + 1) % MAX_X_SIZE_OF_FABRIC, (y - 1) % MAX_Y_SIZE_OF_FABRIC, "SW"), (x, (y - 1) % MAX_Y_SIZE_OF_FABRIC, "W"), ((x - 1) % MAX_X_SIZE_OF_FABRIC, (y - 1) % MAX_Y_SIZE_OF_FABRIC, "NW"), ((x - 1) % MAX_X_SIZE_OF_FABRIC, y, "N"), ((x - 1) % MAX_X_SIZE_OF_FABRIC, (y + 1) % MAX_Y_SIZE_OF_FABRIC, "NE")] # build edges for each direction for this vertex for (dest_x, dest_y, compass) in positions: front_end.add_machine_edge_instance( LatticeEdge( vertices[x][y], vertices[dest_x][dest_y], compass, "edge between {} and {}".format(vertices[x][y], vertices[dest_x][dest_y])), LatticeBasicCell.PARTITION_ID) vertices[x][y].set_direction_vertex( direction=compass, vertex=vertices[dest_x][dest_y]) # run the simulation front_end.run(runtime) # get recorded data recorded_data = dict() # if not front_end.use_virtual_machine(): buffer_manager = front_end.buffer_manager() # get the data per vertex
def generate_machine_graph (self): """ generates a machine graph for the application graph """ print ("generating machine graph") # path to binary files binaries_path = os.path.join(os.path.dirname(__file__), "..", "binaries") # estimate number of SpiNNaker boards required # number of subgroups for grp in self.groups: self.subgroups += grp.subgroups # number of required cores w_cores = self.subgroups * self.subgroups s_cores = self.subgroups * (((self.subgroups - 2) // (MLPConstants.MAX_S_CORE_LINKS - 1)) + 1) i_cores = self.subgroups t_cores = self.subgroups cores = w_cores + s_cores + i_cores + t_cores s = '' if cores == 1 else 's' print (f"need {cores} SpiNNaker core{s}") # number of required chips chips = ((cores - 1) // MLPConstants.DEF_SPINN_CORES_PER_CHIP) + 1 s = '' if chips == 1 else 's' print (f"estimating {chips} SpiNNaker chip{s}") # number of required boards boards = ((chips - 1) // MLPConstants.DEF_SPINN_CHIPS_PER_BOARD) + 1 s = '' if boards == 1 else 's' print (f"requesting {boards} SpiNNaker board{s}") # request a SpiNNaker machine and setup the machine graph try: gfe.setup (model_binary_folder = binaries_path, n_boards_required = boards ) except Exception as err: print ("\n--------------------------------------------------") print (f"error: {err}") print ("--------------------------------------------------\n") return False # create weight, sum, input and threshold # machine vertices associated with every subgroup for grp in self.groups: for sgrp in range (grp.subgroups): # create one weight core for every # (from_group/from_subgroup, group/subgroup) pair #TODO: all-zero cores can be optimised out wvs = [] for from_grp in self.groups: for from_sgrp in range (from_grp.subgroups): wv = WeightVertex (self, grp, sgrp, from_grp, from_sgrp) gfe.add_machine_vertex_instance (wv) wvs.append (wv) grp.w_vertices.append (wvs) # create a sum core tree per subgroup #NOTE: sum vertices are added during tree building svt = SumVertexTree (self, grp, sgrp) grp.s_vertex.append (svt) # create one input core per subgroup iv = InputVertex (self, grp, sgrp) grp.i_vertex.append (iv) gfe.add_machine_vertex_instance (iv) # create one threshold core per subgroup tv = ThresholdVertex (self, grp, sgrp) grp.t_vertex.append (tv) gfe.add_machine_vertex_instance (tv) # groups and subgroups with special functions first_grp = self.groups[0] first_subgroup_svt = first_grp.s_vertex[0] last_out_grp = self.output_chain[-1] last_out_subgroup_t_vertex = ( last_out_grp.t_vertex[last_out_grp.subgroups - 1] ) # create associated forward, backprop, link delta summation, # criterion, stop and sync machine edges for every subgroup for grp in self.groups: for sgrp in range (grp.subgroups): svt = grp.s_vertex[sgrp] iv = grp.i_vertex[sgrp] tv = grp.t_vertex[sgrp] for wv in grp.w_vertices[sgrp]: from_grp = wv.from_group from_sgrp = wv.from_subgroup from_svt = from_grp.s_vertex[from_sgrp] from_tv = from_grp.t_vertex[from_sgrp] # sum tree leaf to connect to depends on group/subgroup svt_leaf = svt.leaf (from_grp, from_sgrp) from_svt_leaf = from_svt.leaf (grp, sgrp) # forward w to s link gfe.add_machine_edge_instance ( MachineEdge (wv, svt_leaf), wv.fwd_link ) # forward t to w (multicast) link gfe.add_machine_edge_instance ( MachineEdge (from_tv, wv), from_tv.fwd_link ) # backprop w to s link gfe.add_machine_edge_instance ( MachineEdge (wv, from_svt_leaf), wv.bkp_link ) # backprop i to w (multicast) link gfe.add_machine_edge_instance ( MachineEdge (iv, wv), iv.bkp_link ) # link delta summation w to s link gfe.add_machine_edge_instance ( MachineEdge (wv, svt_leaf), wv.lds_link ) # link delta result (first group) s to w (multicast) link gfe.add_machine_edge_instance ( MachineEdge (first_subgroup_svt.root, wv), first_subgroup_svt.root.lds_link ) # stop (last output group/subgroup) t to w (multicast) link gfe.add_machine_edge_instance ( MachineEdge (last_out_subgroup_t_vertex, wv), last_out_subgroup_t_vertex.stp_link ) # forward sync generation w to s links gfe.add_machine_edge_instance ( MachineEdge (wv, svt_leaf), wv.fsg_link ) # forward s to i link gfe.add_machine_edge_instance ( MachineEdge (svt.root, iv), svt.root.fwd_link ) # forward i to t link gfe.add_machine_edge_instance ( MachineEdge (iv, tv), iv.fwd_link ) # backprop t to i link gfe.add_machine_edge_instance ( MachineEdge (tv, iv), tv.bkp_link ) # backprop s to t link gfe.add_machine_edge_instance ( MachineEdge (svt.root, tv), svt.root.bkp_link ) # link delta summation s to s link if sgrp != 0: # first subgroup collects from all other subgroups gfe.add_machine_edge_instance ( MachineEdge ( svt.root, grp.s_vertex[0].root ), svt.root.lds_link ) elif grp != first_grp: # first group collects from all other groups gfe.add_machine_edge_instance ( MachineEdge ( svt.root, first_subgroup_svt.root ), svt.root.lds_link ) # t to t criterion link # intra-group criterion link to last subgroup t if sgrp < (grp.subgroups - 1): gfe.add_machine_edge_instance ( MachineEdge (tv, grp.t_vertex[grp.subgroups - 1]), tv.stp_link ) elif grp != last_out_grp: # inter-group criterion link to last output subgroup gfe.add_machine_edge_instance ( MachineEdge (tv, last_out_subgroup_t_vertex), tv.stp_link ) # stop (last output group/subgroup) t to s (multicast) link for s in svt.vertices: gfe.add_machine_edge_instance ( MachineEdge (last_out_subgroup_t_vertex, s), last_out_subgroup_t_vertex.stp_link ) # stop (last output group/subgroup) t to i (multicast) link gfe.add_machine_edge_instance ( MachineEdge (last_out_subgroup_t_vertex, iv), last_out_subgroup_t_vertex.stp_link ) # stop (last output group/subgroup) t to t (multicast) link if tv != last_out_subgroup_t_vertex: gfe.add_machine_edge_instance ( MachineEdge (last_out_subgroup_t_vertex, tv), last_out_subgroup_t_vertex.stp_link ) # forward sync generation s to s links #NOTE: s cores that are tree internal nodes not involved if sgrp != 0: # first subgroup collects from all other subgroups gfe.add_machine_edge_instance ( MachineEdge ( svt.root, grp.s_vertex[0].root ), svt.root.fsg_link ) elif grp != first_grp: # first group collects from all other groups gfe.add_machine_edge_instance ( MachineEdge ( svt.root, first_subgroup_svt.root ), svt.root.fsg_link ) # forward sync generation first s to last t link gfe.add_machine_edge_instance ( MachineEdge (first_subgroup_svt.root, last_out_subgroup_t_vertex), first_subgroup_svt.root.fsg_link ) self._graph_rdy = True return True
mbs = 1.0 # setup system sim.setup(model_binary_module=test_retransmission_phase_on_multi_cores) # build verts reader = SDRAMReaderAndTransmitterWithProtocol(mbs) reader.add_constraint(ChipAndCoreConstraint(x=1, y=1)) receiver = PacketGathererWithProtocol() # add verts to graph sim.add_machine_vertex_instance(reader) sim.add_machine_vertex_instance(receiver) # build and add edge to graph sim.add_machine_edge_instance(MachineEdge(reader, receiver), "TRANSMIT") # run forever (to allow better speed testing) sim.run() # get placements for extraction placements = sim.placements() # try getting data via mc transmission start = None end = None data = None sim.transceiver().set_watch_dog(False) try:
def run_mcmc(model, data, n_samples, burn_in=2000, thinning=5, degrees_of_freedom=3.0, seed=None, n_chips=None, n_boards=None): """ Executes an MCMC model, returning the received samples :param model: The MCMCModel to be used :param data: The data to sample :param n_samples: The number of samples to generate :param burn_in:\ no of MCMC transitions to reach apparent equilibrium before\ generating inference samples :param thinning:\ sampling rate i.e. 5 = 1 sample for 5 generated steps :param degrees_of_freedom:\ The number of degrees of freedom to jump around with :param seed: The random seed to use :param n_chips: The number of chips to run the model on :param root_finder: Use the root finder by adding root finder vertices :param cholesky: Use the Cholesky algorithm by adding Cholesky vertices :return: The samples read :rtype: A numpy array with fields for each model state variable """ # Set up the simulation g.setup(n_boards_required=n_boards, n_chips_required=n_chips, model_binary_module=model_binaries) # Get the number of cores available for use n_cores = 0 machine = g.machine() # Create a coordinator for each board coordinators = dict() boards = dict() for chip in machine.ethernet_connected_chips: # Create a coordinator coordinator = MCMCCoordinatorVertex(model, data, n_samples, burn_in, thinning, degrees_of_freedom, seed) g.add_machine_vertex_instance(coordinator) # Put the coordinator on the Ethernet chip coordinator.add_constraint(ChipAndCoreConstraint(chip.x, chip.y)) coordinators[chip.x, chip.y] = coordinator boards[chip.x, chip.y] = chip.ip_address # Go through all the chips and add the workhorses n_chips_on_machine = machine.n_chips n_workers = 0 if (model.root_finder): n_root_finders = 0 if (model.cholesky): n_cholesky = 0 for chip in machine.chips: # Count the cores in the processor # (-1 if this chip also has a coordinator) n_cores = len([p for p in chip.processors if not p.is_monitor]) if (chip.x, chip.y) in coordinators: n_cores -= 3 # coordinator and extra_monitor_support (2) if (model.root_finder): if (model.cholesky): n_cores = n_cores // 3 else: n_cores = n_cores // 2 else: n_cores -= 1 # just extra_monitor_support if (model.root_finder): if (model.cholesky): n_cores = n_cores // 3 else: n_cores = n_cores // 2 # Find the coordinator for the board (or 0, 0 if it is missing) eth_x = chip.nearest_ethernet_x eth_y = chip.nearest_ethernet_y coordinator = coordinators.get((eth_x, eth_y)) if coordinator is None: print("Warning - couldn't find {}, {} for chip {}, {}".format( eth_x, eth_y, chip.x, chip.y)) coordinator = coordinators[0, 0] print("Using coordinator ", coordinator) # hard-code remove some cores (chip power monitor etc.) just # to see what happens # n_cores -= non_worker_cores_per_chip # print 'n_cores: ', n_cores # Add a vertex for each core for _ in range(n_cores): # Create the vertex and add it to the graph vertex = MCMCVertex(coordinator, model) n_workers += 1 g.add_machine_vertex_instance(vertex) # Put the vertex on the same board as the coordinator vertex.add_constraint(ChipAndCoreConstraint(chip.x, chip.y)) # Add an edge from the coordinator to the vertex, to send the data g.add_machine_edge_instance(MachineEdge(coordinator, vertex), coordinator.data_partition_name) # Add an edge from the vertex to the coordinator, # to send acknowledgement g.add_machine_edge_instance(MachineEdge(vertex, coordinator), coordinator.acknowledge_partition_name) if (model.root_finder): # Create a root finder vertex rf_vertex = MCMCRootFinderVertex(vertex, model) n_root_finders += 1 g.add_machine_vertex_instance(rf_vertex) # put it on the same chip as the standard mcmc vertex? # no - put it on a "nearby" chip, however that works rf_vertex.add_constraint(ChipAndCoreConstraint(chip.x, chip.y)) # Add an edge from mcmc vertex to root finder vertex, # to "send" the data - need to work this out g.add_machine_edge_instance(MachineEdge(vertex, rf_vertex), vertex.parameter_partition_name) # Add edge from root finder vertex back to mcmc vertex # to send acknowledgement / result - need to work this out g.add_machine_edge_instance(MachineEdge(rf_vertex, vertex), vertex.result_partition_name) if (model.cholesky): # Create a Cholesky vertex cholesky_vertex = MCMCCholeskyVertex(vertex, model) n_cholesky += 1 g.add_machine_vertex_instance(cholesky_vertex) # put it on the same chip as the standard mcmc vertex? # no - put it on a "nearby" chip, however that works cholesky_vertex.add_constraint( ChipAndCoreConstraint(chip.x, chip.y)) # Add an edge from mcmc vertex to Cholesky vertex, # to "send" the data - need to work this out g.add_machine_edge_instance( MachineEdge(vertex, cholesky_vertex), vertex.cholesky_partition_name) # Add edge from Cholesky vertex back to mcmc vertex # to send acknowledgement / result - need to work this out g.add_machine_edge_instance( MachineEdge(cholesky_vertex, vertex), vertex.cholesky_result_partition_name) start_computing_time = time.time() logger.info("n_chips_on_machine {}".format(n_chips_on_machine)) logger.info("Running {} worker cores".format(n_workers)) if (model.root_finder): logger.info("Running {} root finder cores".format(n_root_finders)) if (model.cholesky): logger.info("Running {} Cholesky cores".format(n_cholesky)) # Run the simulation g.run_until_complete() mid_computing_time = time.time() # Wait for the application to finish txrx = g.transceiver() app_id = globals_variables.get_simulator()._app_id logger.info("Running {} worker cores".format(n_workers)) if (model.root_finder): logger.info("Running {} root finder cores".format(n_root_finders)) if (model.cholesky): logger.info("Running {} Cholesky cores".format(n_cholesky)) logger.info("Waiting for application to finish...") running = txrx.get_core_state_count(app_id, CPUState.RUNNING) # there are now cores doing extra_monitor etc. non_worker_cores = n_chips_on_machine + (2 * len(boards)) while running > non_worker_cores: time.sleep(0.5) error = txrx.get_core_state_count(app_id, CPUState.RUN_TIME_EXCEPTION) watchdog = txrx.get_core_state_count(app_id, CPUState.WATCHDOG) if error > 0 or watchdog > 0: error_msg = "Some cores have failed ({} RTE, {} WDOG)".format( error, watchdog) raise Exception(error_msg) running = txrx.get_core_state_count(app_id, CPUState.RUNNING) print('running: ', running) finish_computing_time = time.time() # Get the data back samples = dict() for coord, coordinator in iteritems(coordinators): samples[coord[0], coord[1]] = coordinator.read_samples(g.buffer_manager()) # Close the machine g.stop() finish_time = time.time() # Note: this timing appears to be incorrect now; needs looking at print("Overhead time is %s seconds" % (start_computing_time - start_time)) print("Computing time is %s seconds" % (finish_computing_time - start_computing_time)) print("run_until_complete takes %s seconds" % (mid_computing_time - start_computing_time)) print("Data collecting time is %s seconds" % (finish_time - finish_computing_time)) print("Overall running time is %s seconds" % (finish_time - start_time)) return samples
def generate_machine_graph (self): """ generates a machine graph for the application graph """ print "generating machine graph" # setup the machine graph g.setup () # set the number of write blocks before generating vertices self._num_write_blks = len (self.output_chain) # create associated weight, sum, input and threshold # machine vertices for every network group for grp in self.groups: # create one weight core per (from_group, group) pair # NOTE: all-zero cores can be optimised out for from_grp in self.groups: wv = WeightVertex (self, grp, from_grp) grp.w_vertices.append (wv) g.add_machine_vertex_instance (wv) self._num_vertices += 1 # create one sum core per group sv = SumVertex (self, grp) grp.s_vertex = sv g.add_machine_vertex_instance (sv) self._num_vertices += 1 # create one input core per group iv = InputVertex (self, grp) grp.i_vertex = iv g.add_machine_vertex_instance (iv) self._num_vertices += 1 # create one sum core per group tv = ThresholdVertex (self, grp) grp.t_vertex = tv g.add_machine_vertex_instance (tv) self._num_vertices += 1 # create associated forward, backprop, synchronisation and # stop machine edges for every network group first = self.groups[0] for grp in self.groups: for w in grp.w_vertices: _frmg = w.from_group # create forward w to s links g.add_machine_edge_instance (MachineEdge (w, grp.s_vertex), w.fwd_link) # create forward t to w (multicast) links g.add_machine_edge_instance (MachineEdge (_frmg.t_vertex, w), _frmg.t_vertex.fwd_link) # create backprop w to s links g.add_machine_edge_instance (MachineEdge (w, _frmg.s_vertex), w.bkp_link) # create backprop i to w (multicast) links g.add_machine_edge_instance (MachineEdge (grp.i_vertex, w), grp.i_vertex.bkp_link) # create forward synchronisation w to t links g.add_machine_edge_instance (MachineEdge (w, _frmg.t_vertex), w.fds_link) # create link delta summation w to s links g.add_machine_edge_instance (MachineEdge (w, grp.s_vertex), w.lds_link) # create link delta summation result s (first) to w links g.add_machine_edge_instance (MachineEdge (first.s_vertex, w), first.s_vertex.lds_link) # create forward s to i link g.add_machine_edge_instance (MachineEdge (grp.s_vertex, grp.i_vertex), grp.s_vertex.fwd_link) # create backprop s to t link g.add_machine_edge_instance (MachineEdge (grp.s_vertex, grp.t_vertex), grp.s_vertex.bkp_link) # create forward i to t link g.add_machine_edge_instance (MachineEdge (grp.i_vertex, grp.t_vertex), grp.i_vertex.fwd_link) # create backprop t to i link g.add_machine_edge_instance (MachineEdge (grp.t_vertex, grp.i_vertex), grp.t_vertex.bkp_link) # create link delta summation s to s links - all s cores # (except the first) send to the first s core if grp != first: print "Creating lds s-s edge from group {} to group {}".\ format (grp.label, first.label) g.add_machine_edge_instance (MachineEdge (grp.s_vertex, first.s_vertex), grp.s_vertex.lds_link) # create stop links, if OUTPUT group if grp in self.output_chain: # if last OUTPUT group broadcast stop decision if grp == self.output_chain[-1]: for stpg in self.groups: # create stop links to all w cores for w in stpg.w_vertices: g.add_machine_edge_instance\ (MachineEdge (grp.t_vertex, w), grp.t_vertex.stp_link) # create stop links to all s cores g.add_machine_edge_instance\ (MachineEdge (grp.t_vertex, stpg.s_vertex),\ grp.t_vertex.stp_link) # create stop links to all i cores g.add_machine_edge_instance\ (MachineEdge (grp.t_vertex, stpg.i_vertex),\ grp.t_vertex.stp_link) # create stop links to t cores (no link to itself!) if stpg != grp: g.add_machine_edge_instance\ (MachineEdge (grp.t_vertex, stpg.t_vertex),\ grp.t_vertex.stp_link) else: # create stop link to next OUTPUT group in chain _inx = self.output_chain.index (grp) _stpg = self.output_chain[_inx + 1] g.add_machine_edge_instance (MachineEdge (grp.t_vertex, _stpg.t_vertex), grp.t_vertex.stp_link)
def __init__(self, network, group, subgroup): max_links = MLPConstants.MAX_S_CORE_LINKS # total number of Sum Vertices needed to build the tree num_vrt = ((network.subgroups - 2) // (max_links - 1)) + 1 # the root vertex is used as pre-vertex for outgoing links self._root = SumVertex(network, group, subgroup, 0) # add the root to the graph gfe.add_machine_vertex_instance(self.root) # and to the list of all tree vertices self._vertices = [self.root] # create the SumVertex tree free_links = max_links to_vrt = 0 for vrt in range(1, num_vrt): # create a SumVertex vt = SumVertex(network, group, subgroup, vrt) # add it to the list of vertices self._vertices.append(vt) # add it to the graph gfe.add_machine_vertex_instance(vt) # add all SumVertex links towards the tree root gfe.add_machine_edge_instance( MachineEdge(vt, self.vertices[to_vrt]), vt.fwd_link) gfe.add_machine_edge_instance( MachineEdge(vt, self.vertices[to_vrt]), vt.bkp_link) gfe.add_machine_edge_instance( MachineEdge(vt, self.vertices[to_vrt]), vt.lds_link) gfe.add_machine_edge_instance( MachineEdge(vt, self.vertices[to_vrt]), vt.fsg_link) # take away one free link from vertex to_vrt free_links -= 1 # if out of free links use next available vertex if free_links == 0: free_links = max_links to_vrt += 1 # finally, map every pre-vertex to an available tree vertex self._leaf_map = {} for grp in network.groups: for sgrp in range(grp.subgroups): # assign available leaf vertex self._leaf_map[(grp.id, sgrp)] = self.vertices[to_vrt] # take away one free link from vertex to_vrt free_links -= 1 # if out of free links use next available vertex if free_links == 0: free_links = max_links to_vrt += 1
output_vertex = ICUBOutputVertex(spinnaker_link_id=spinnaker_link_used, board_address=None, label="Output Vertex") front_end.add_machine_vertex_instance(output_vertex) # create retina filters and edges from retina to filters for y_row in range(0, constants.RETINA_Y_SIZE): partition_identifier = "retina_slice_row_{}".format(y_row) vertex = RetinaFilter(partition_identifier=partition_identifier, filter=y_row, row_id=y_row) filter_list.append(vertex) front_end.add_machine_vertex_instance(vertex) front_end.add_machine_edge_instance( MachineEdge(pre_vertex=input_vertex, post_vertex=vertex, label="Edge between retina and filter"), partition_identifier) # create particles main_particle = True the_main_particle = None for x in range(0, n_particles): vertex = PfFullParticleVertex(x=constants.RETINA_X_SIZE / 2, y=constants.RETINA_Y_SIZE / 2, r=constants.INITIAL_R, batch_size=constants.MAX_BATCH_SIZE, n_particles=n_particles, part_id=x, label="Particle {}".format(x), main_particle=main_particle)