def setup(self): sim.setup(model_binary_module=common) vertex_1 = SdramTestVertex(2, fixed_sdram_value=200 * 1024 * 1024) vertex_1.splitter = SDRAMSplitterExternal( ConstantSDRAMMachinePartition) vertex_2 = SdramTestVertex(2, fixed_sdram_value=200 * 1024 * 1024) vertex_2.splitter = SDRAMSplitterExternal( ConstantSDRAMMachinePartition) sim.add_vertex_instance(vertex_1) sim.add_vertex_instance(vertex_2) sim.add_application_edge_instance(ApplicationEdge(vertex_1, vertex_2), "sdram") with self.assertRaises(PacmanValueError): sim.run(100)
def setup(self): sim.setup(model_binary_module=common) vertex_1 = SdramTestVertex(2, fixed_sdram_value=20) vertex_1.splitter = SDRAMSplitterExternal( ConstantSDRAMMachinePartition) vertex_2 = SdramTestVertex(2, fixed_sdram_value=20) vertex_2.splitter = SDRAMSplitterExternal( ConstantSDRAMMachinePartition) sim.add_vertex_instance(vertex_1) sim.add_vertex_instance(vertex_2) sim.add_application_edge_instance( ApplicationEdge(vertex_1, vertex_2), "sdram") sim.run(100) sim.stop()
def test_rte_during_run_forever(): def start(): sleep(3.0) s.stop_run() conn = DatabaseConnection(start, local_port=None) s.setup(model_binary_folder=os.path.dirname(__file__)) s.add_machine_vertex_instance( RunVertex("test_rte_during_run.aplx", ExecutableType.USES_SIMULATION_INTERFACE)) s.add_socket_address(None, "localhost", conn.local_port) s.run(None) with pytest.raises(ExecutableFailedToStopException): s.stop() conn.close()
def setup(n_cores): global partition_manager partition_manager = util.PartitionManager() front_end.setup( n_chips_required=n_cores // globals.cores_per_chip, model_binary_folder=util.absolute_path_from_home(), machine_time_step=globals.machine_time_step, time_scale_factor=globals.time_scale_factor, ) __add_db_sock() available_cores = \ front_end.get_number_of_available_cores_on_machine() if available_cores <= n_cores: raise KeyError( "SpiNNaker doesn't have enough cores to run Model")
def check_extra_monitor(self): mbs = _TRANSFER_SIZE_MEGABYTES # setup system globals_variables.unset_simulator() sim.setup(model_binary_folder=os.path.dirname(__file__), n_chips_required=2) # build verts writer_vertex = SDRAMWriter(mbs) # add verts to graph sim.add_machine_vertex_instance(writer_vertex) sim.run(12) writer_placement = sim.placements().get_placement_of_vertex( writer_vertex) # pylint: disable=protected-access outputs = sim.globals_variables.get_simulator()._last_run_outputs monitor_vertices = outputs[_MONITOR_VERTICES] receiver_plt = _get_monitor_placement(monitor_vertices, writer_placement) gatherers, gatherer = _get_gatherer_for_monitor(writer_vertex) start = float(time.time()) data = _do_transfer(gatherer, gatherers, monitor_vertices, receiver_plt, writer_placement, writer_vertex) end = float(time.time()) print( "time taken to extract {} MB is {}. Transfer rate: {} Mb/s".format( mbs, end - start, (mbs * 8) / (end - start))) check_data(data) sim.stop()
def check_extra_monitor(self): mbs = _TRANSFER_SIZE_MEGABYTES # setup system sim.setup(model_binary_folder=os.path.dirname(__file__), n_chips_required=2) # build verts writer_vertex = SDRAMWriter(mbs) # add verts to graph sim.add_machine_vertex_instance(writer_vertex) sim.run(12) writer_placement = sim.placements().get_placement_of_vertex( writer_vertex) # pylint: disable=protected-access monitor_vertices = sim.globals_variables.get_simulator().\ _extra_monitor_to_chip_mapping receiver_plt = _get_monitor_placement(monitor_vertices, writer_placement) gatherers, gatherer = _get_gatherer_for_monitor(writer_vertex) start = float(time.time()) data = _do_transfer(gatherer, gatherers, monitor_vertices, receiver_plt, writer_placement, writer_vertex) end = float(time.time()) print(f"time taken to extract {mbs} MB is {end - start}. " f"Transfer rate: {(mbs * 8) / (end - start)} Mb/s") check_data(data) sim.stop()
import spinnaker_graph_front_end as sim from pacman.model.constraints.placer_constraints import ChipAndCoreConstraint from pacman.model.graphs.machine import MachineEdge from spinnaker_graph_front_end.examples.speed_tracker.packt_gatherer import \ PacketGatherer from spinnaker_graph_front_end.examples.speed_tracker.\ sdram_reader_and_transmitter import \ SDRAMReaderAndTransmitter import time from spinnaker_graph_front_end.examples import speed_tracker # data to write mbs = 20.0 # setup system sim.setup(model_binary_module=speed_tracker) # build verts reader = SDRAMReaderAndTransmitter(mbs) reader.add_constraint(ChipAndCoreConstraint(x=1, y=1)) receiver = PacketGatherer() # add verts to graph sim.add_machine_vertex_instance(reader) sim.add_machine_vertex_instance(receiver) # build and add edge to graph sim.add_machine_edge_instance(MachineEdge(reader, receiver), "TRANSMIT") # run forever (to allow better speed testing) sim.run()
def run(self, mbs, number_of_repeats): # setup system sim.setup(model_binary_module=( test_extra_monitor_core_data_extraction_multiple_locations), n_chips_required=49 * 2) # build vertices locs = [(0, 0), (2, 2), (7, 7), (3, 0), (1, 0), (0, 1), (3, 3), (4, 4), (5, 5), (3, 5), (4, 0), (7, 4), (8, 4), (4, 8), (11, 11), (11, 0), (0, 11), (6, 3), (0, 6)] writers = list() for chip_x, chip_y in locs: writer = SDRAMWriter(mbs, constraint=ChipAndCoreConstraint( chip_x, chip_y)) # add vertices to graph sim.add_machine_vertex_instance(writer) writers.append(writer) sim.run(12) # get placements for extraction placements = sim.placements() machine = sim.machine() extra_monitor_vertices = sim.globals_variables. \ get_simulator()._last_run_outputs[ 'MemoryExtraMonitorVertices'] extra_monitor_gatherers = sim.globals_variables. \ get_simulator()._last_run_outputs[ 'MemoryMCGatherVertexToEthernetConnectedChipMapping'] time_out_setter = extra_monitor_gatherers[(0, 0)] time_out_setter.set_cores_for_data_extraction(sim.transceiver(), extra_monitor_vertices, placements) for _ in range(0, number_of_repeats): for writer in writers: writer_placement = placements.get_placement_of_vertex(writer) writer_chip = \ machine.get_chip_at(writer_placement.x, writer_placement.y) writer_nearest_ethernet = machine.get_chip_at( writer_chip.nearest_ethernet_x, writer_chip.nearest_ethernet_y) receiver = None gatherer = extra_monitor_gatherers[(writer_nearest_ethernet.x, writer_nearest_ethernet.y)] for vertex in extra_monitor_vertices: placement = placements.get_placement_of_vertex(vertex) if (placement.x == writer_placement.x and placement.y == writer_placement.y): receiver = vertex start = float(time.time()) data = gatherer.get_data( sim.transceiver(), placements.get_placement_of_vertex(receiver), self._get_data_region_address(sim.transceiver(), writer_placement), writer.mbs_in_bytes) end = float(time.time()) print("time taken to extract {} MB is {}. MBS of {}".format( mbs, end - start, (mbs * 8) / (end - start))) self._check_data(data) time_out_setter.unset_cores_for_data_extraction( sim.transceiver(), extra_monitor_vertices, placements)
def run_broken(): machine_time_step = 1000 time_scale_factor = 1 # machine_port = 11111 machine_receive_port = 22222 machine_host = "0.0.0.0" live_gatherer_label = "LiveHeatGatherer" notify_port = 19999 database_listen_port = 19998 # set up the front end and ask for the detected machines dimensions front_end.setup( graph_label="heat_demo_graph", model_binary_module=sys.modules[__name__], database_socket_addresses={SocketAddress( "127.0.0.1", notify_port, database_listen_port)}) machine = front_end.machine() # create a live gatherer vertex for each board default_gatherer = None live_gatherers = dict() used_cores = set() for chip in machine.ethernet_connected_chips: # Try to use core 17 if one is available as it is outside the grid processor = chip.get_processor_with_id(17) if processor is None or processor.is_monitor: processor = chip.get_first_none_monitor_processor() if processor is not None: live_gatherer = front_end.add_machine_vertex( LivePacketGatherMachineVertex, { 'label': live_gatherer_label, 'ip_address': machine_host, 'port': machine_receive_port, 'payload_as_time_stamps': False, 'use_payload_prefix': False, 'strip_sdp': True, 'message_type': EIEIOType.KEY_PAYLOAD_32_BIT } ) live_gatherers[chip.x, chip.y] = live_gatherer used_cores.add((chip.x, chip.y, processor.processor_id)) if default_gatherer is None: default_gatherer = live_gatherer # Create a list of lists of vertices (x * 4) by (y * 4) # (for 16 cores on a chip - missing cores will have missing vertices) max_x_element_id = (machine.max_chip_x + 1) * 4 max_y_element_id = (machine.max_chip_y + 1) * 4 vertices = [ [None for _ in range(max_y_element_id)] for _ in range(max_x_element_id) ] receive_labels = list() for x in range(0, max_x_element_id): for y in range(0, max_y_element_id): chip_x = x / 4 chip_y = y / 4 core_x = x % 4 core_y = y % 4 core_p = ((core_x * 4) + core_y) + 1 # Add an element if the chip and core exists chip = machine.get_chip_at(chip_x, chip_y) if chip is not None: core = chip.get_processor_with_id(core_p) if (core is not None and not core.is_monitor and (chip_x, chip_y, core_p) not in used_cores): element = front_end.add_machine_vertex( HeatDemoVertex, { 'machine_time_step': machine_time_step, 'time_scale_factor': time_scale_factor }, label="Heat Element {}, {}".format( x, y)) vertices[x][y] = element vertices[x][y].add_constraint( ChipAndCoreConstraint(chip_x, chip_y, core_p)) # add a link from the heat element to the live packet # gatherer live_gatherer = live_gatherers.get( (chip.nearest_ethernet_x, chip.nearest_ethernet_y), default_gatherer) front_end.add_machine_edge( MachineEdge, { 'pre_vertex': vertices[x][y], 'post_vertex': live_gatherer }, label="Live output from {}, {}".format(x, y), semantic_label="TRANSMISSION") receive_labels.append(vertices[x][y].label) # build edges for x in range(0, max_x_element_id): for y in range(0, max_y_element_id): if vertices[x][y] is not None: # Add a north link if not at the top if y+1 < max_y_element_id and vertices[x][y+1] is not None: front_end.add_machine_edge( HeatDemoEdge, { 'pre_vertex': vertices[x][y], 'post_vertex': vertices[x][y + 1], 'direction': HeatDemoEdge.DIRECTIONS.SOUTH }, label="North Edge from {}, {} to {}, {}".format( x, y, x + 1, y), semantic_label="TRANSMISSION") # Add an east link if not at the right if x+1 < max_y_element_id and vertices[x+1][y] is not None: front_end.add_machine_edge( HeatDemoEdge, { 'pre_vertex': vertices[x][y], 'post_vertex': vertices[x + 1][y], 'direction': HeatDemoEdge.DIRECTIONS.WEST }, label="East Edge from {}, {} to {}, {}".format( x, y, x + 1, y), semantic_label="TRANSMISSION") # Add a south link if not at the bottom if (y - 1) >= 0 and vertices[x][y - 1] is not None: front_end.add_machine_edge( HeatDemoEdge, { 'pre_vertex': vertices[x][y], 'post_vertex': vertices[x][y - 1], 'direction': HeatDemoEdge.DIRECTIONS.NORTH }, label="South Edge from {}, {} to {}, {}".format( x, y, x, y - 1), semantic_label="TRANSMISSION") # check for the likely hood for a W link if (x - 1) >= 0 and vertices[x - 1][y] is not None: front_end.add_machine_edge( HeatDemoEdge, { 'pre_vertex': vertices[x][y], 'post_vertex': vertices[x - 1][y], 'direction': HeatDemoEdge.DIRECTIONS.EAST }, label="West Edge from {}, {} to {}, {}".format( x, y, x - 1, y), semantic_label="TRANSMISSION") # Set up the live connection for receiving heat elements live_heat_connection = LiveEventConnection( live_gatherer_label, receive_labels=receive_labels, local_port=notify_port, machine_vertices=True) heat_values = defaultdict(list) condition = Condition() def receive_heat(label, atom, value): with condition: print "{}: {}".format(label, value / 65536.0) # Set up callbacks to occur when spikes are received for label in receive_labels: live_heat_connection.add_receive_callback(label, receive_heat) front_end.run(1000) front_end.stop() for label in receive_labels: print "{}: {}".format( label, ["{:05.2f}".format(value) for value in heat_values[label]])
import struct import traceback import spinnaker_graph_front_end as sim import time from spinnaker_graph_front_end.examples import speed_test_solo # data to write from spinnaker_graph_front_end.examples.speed_test_solo.\ packet_gatherer_cheat import PacketGathererCheat mbs = 1.0 # setup system sim.setup(model_binary_module=speed_test_solo) # build verts receiver = PacketGathererCheat(mbs, 1) # add verts to graph sim.add_machine_vertex_instance(receiver) # run forever (to allow better speed testing) sim.run() # get placements for extraction placements = sim.placements() # try getting data via mc transmission start = None end = None
import spinnaker_graph_front_end as front_end from pacman.model.constraints.placer_constraints import ChipAndCoreConstraint from spinnaker_graph_front_end.examples.hello_world.hello_world_vertex\ import HelloWorldVertex import logging import os from spinnaker_graph_front_end.examples.test_fixed_router.\ hello_world_vertex_clone import HelloWorldVertexClone logger = logging.getLogger(__name__) front_end.setup(n_chips_required=None, model_binary_folder=os.getcwd()) front_end.globals_variables.get_simulator().update_extra_mapping_inputs( {"FixedRouteDestinationClass": HelloWorldVertexClone}) # fill all cores with a HelloWorldVertex each front_end.add_machine_vertex_instance( HelloWorldVertex(label="transmitter", constraints=[ChipAndCoreConstraint(x=1, y=1)])) front_end.add_machine_vertex_instance( HelloWorldVertexClone(label="the clone!", constraints=[ChipAndCoreConstraint(x=0, y=0)])) front_end.run(10) front_end.stop()
def generate_machine_graph (self): """ generates a machine graph for the application graph """ print ("generating machine graph") # path to binary files binaries_path = os.path.join(os.path.dirname(__file__), "..", "binaries") # estimate number of SpiNNaker boards required # number of subgroups for grp in self.groups: self.subgroups += grp.subgroups # number of required cores w_cores = self.subgroups * self.subgroups s_cores = self.subgroups * (((self.subgroups - 2) // (MLPConstants.MAX_S_CORE_LINKS - 1)) + 1) i_cores = self.subgroups t_cores = self.subgroups cores = w_cores + s_cores + i_cores + t_cores s = '' if cores == 1 else 's' print (f"need {cores} SpiNNaker core{s}") # number of required chips chips = ((cores - 1) // MLPConstants.DEF_SPINN_CORES_PER_CHIP) + 1 s = '' if chips == 1 else 's' print (f"estimating {chips} SpiNNaker chip{s}") # number of required boards boards = ((chips - 1) // MLPConstants.DEF_SPINN_CHIPS_PER_BOARD) + 1 s = '' if boards == 1 else 's' print (f"requesting {boards} SpiNNaker board{s}") # request a SpiNNaker machine and setup the machine graph try: gfe.setup (model_binary_folder = binaries_path, n_boards_required = boards ) except Exception as err: print ("\n--------------------------------------------------") print (f"error: {err}") print ("--------------------------------------------------\n") return False # create weight, sum, input and threshold # machine vertices associated with every subgroup for grp in self.groups: for sgrp in range (grp.subgroups): # create one weight core for every # (from_group/from_subgroup, group/subgroup) pair #TODO: all-zero cores can be optimised out wvs = [] for from_grp in self.groups: for from_sgrp in range (from_grp.subgroups): wv = WeightVertex (self, grp, sgrp, from_grp, from_sgrp) gfe.add_machine_vertex_instance (wv) wvs.append (wv) grp.w_vertices.append (wvs) # create a sum core tree per subgroup #NOTE: sum vertices are added during tree building svt = SumVertexTree (self, grp, sgrp) grp.s_vertex.append (svt) # create one input core per subgroup iv = InputVertex (self, grp, sgrp) grp.i_vertex.append (iv) gfe.add_machine_vertex_instance (iv) # create one threshold core per subgroup tv = ThresholdVertex (self, grp, sgrp) grp.t_vertex.append (tv) gfe.add_machine_vertex_instance (tv) # groups and subgroups with special functions first_grp = self.groups[0] first_subgroup_svt = first_grp.s_vertex[0] last_out_grp = self.output_chain[-1] last_out_subgroup_t_vertex = ( last_out_grp.t_vertex[last_out_grp.subgroups - 1] ) # create associated forward, backprop, link delta summation, # criterion, stop and sync machine edges for every subgroup for grp in self.groups: for sgrp in range (grp.subgroups): svt = grp.s_vertex[sgrp] iv = grp.i_vertex[sgrp] tv = grp.t_vertex[sgrp] for wv in grp.w_vertices[sgrp]: from_grp = wv.from_group from_sgrp = wv.from_subgroup from_svt = from_grp.s_vertex[from_sgrp] from_tv = from_grp.t_vertex[from_sgrp] # sum tree leaf to connect to depends on group/subgroup svt_leaf = svt.leaf (from_grp, from_sgrp) from_svt_leaf = from_svt.leaf (grp, sgrp) # forward w to s link gfe.add_machine_edge_instance ( MachineEdge (wv, svt_leaf), wv.fwd_link ) # forward t to w (multicast) link gfe.add_machine_edge_instance ( MachineEdge (from_tv, wv), from_tv.fwd_link ) # backprop w to s link gfe.add_machine_edge_instance ( MachineEdge (wv, from_svt_leaf), wv.bkp_link ) # backprop i to w (multicast) link gfe.add_machine_edge_instance ( MachineEdge (iv, wv), iv.bkp_link ) # link delta summation w to s link gfe.add_machine_edge_instance ( MachineEdge (wv, svt_leaf), wv.lds_link ) # link delta result (first group) s to w (multicast) link gfe.add_machine_edge_instance ( MachineEdge (first_subgroup_svt.root, wv), first_subgroup_svt.root.lds_link ) # stop (last output group/subgroup) t to w (multicast) link gfe.add_machine_edge_instance ( MachineEdge (last_out_subgroup_t_vertex, wv), last_out_subgroup_t_vertex.stp_link ) # forward sync generation w to s links gfe.add_machine_edge_instance ( MachineEdge (wv, svt_leaf), wv.fsg_link ) # forward s to i link gfe.add_machine_edge_instance ( MachineEdge (svt.root, iv), svt.root.fwd_link ) # forward i to t link gfe.add_machine_edge_instance ( MachineEdge (iv, tv), iv.fwd_link ) # backprop t to i link gfe.add_machine_edge_instance ( MachineEdge (tv, iv), tv.bkp_link ) # backprop s to t link gfe.add_machine_edge_instance ( MachineEdge (svt.root, tv), svt.root.bkp_link ) # link delta summation s to s link if sgrp != 0: # first subgroup collects from all other subgroups gfe.add_machine_edge_instance ( MachineEdge ( svt.root, grp.s_vertex[0].root ), svt.root.lds_link ) elif grp != first_grp: # first group collects from all other groups gfe.add_machine_edge_instance ( MachineEdge ( svt.root, first_subgroup_svt.root ), svt.root.lds_link ) # t to t criterion link # intra-group criterion link to last subgroup t if sgrp < (grp.subgroups - 1): gfe.add_machine_edge_instance ( MachineEdge (tv, grp.t_vertex[grp.subgroups - 1]), tv.stp_link ) elif grp != last_out_grp: # inter-group criterion link to last output subgroup gfe.add_machine_edge_instance ( MachineEdge (tv, last_out_subgroup_t_vertex), tv.stp_link ) # stop (last output group/subgroup) t to s (multicast) link for s in svt.vertices: gfe.add_machine_edge_instance ( MachineEdge (last_out_subgroup_t_vertex, s), last_out_subgroup_t_vertex.stp_link ) # stop (last output group/subgroup) t to i (multicast) link gfe.add_machine_edge_instance ( MachineEdge (last_out_subgroup_t_vertex, iv), last_out_subgroup_t_vertex.stp_link ) # stop (last output group/subgroup) t to t (multicast) link if tv != last_out_subgroup_t_vertex: gfe.add_machine_edge_instance ( MachineEdge (last_out_subgroup_t_vertex, tv), last_out_subgroup_t_vertex.stp_link ) # forward sync generation s to s links #NOTE: s cores that are tree internal nodes not involved if sgrp != 0: # first subgroup collects from all other subgroups gfe.add_machine_edge_instance ( MachineEdge ( svt.root, grp.s_vertex[0].root ), svt.root.fsg_link ) elif grp != first_grp: # first group collects from all other groups gfe.add_machine_edge_instance ( MachineEdge ( svt.root, first_subgroup_svt.root ), svt.root.fsg_link ) # forward sync generation first s to last t link gfe.add_machine_edge_instance ( MachineEdge (first_subgroup_svt.root, last_out_subgroup_t_vertex), first_subgroup_svt.root.fsg_link ) self._graph_rdy = True return True
sys.exit("error: no connections to test") # throttle injectors to avoid dropped packets #NOTE: [master_1.0.0] [46] 3802278 - no NAKs #NOTE: [master_SA_1.0.1] [46] 3802278 - no NAKs INSIDE_INJECTOR_THROTTLE = [8, 8, 8, 8, 8, 8, 8, 8] #NOTE: [master_1.0.0] [54] 3300327 - no NAKs #NOTE: [master_SA_1.0.1] [54] 3300327 - no NAKs #NOTE: [master_1.0.0] [48] (3409224, 3409237) 2727363 NAKs/38 dropped #NOTE: [master_SA_1.0.1] [48] (3348700, 3409284) 2697373 NAKs/10982 dropped OUTSIDE_INJECTOR_THROTTLE = [8, 8, 8, 8, 8, 8, 8, 8] # make sure to get two neighbouring boards across FPGA2 gfe.setup(machine_time_step=1000000, n_boards_required=3, model_binary_folder=os.path.dirname(__file__) + "/..") # create connections along the border for n in range(NUM_CONNECTIONS): # inside chip coordinates (xin, yin) = INSIDE_CHIPS[n] # outside chip coordinates (xout, yout) = OUTSIDE_CHIPS[n] for i in range(NUM_INJECTORS_PER_CONNECTION): if DO_INSIDE_OUT: # instantiate inside-out injector vertex iv = Pkt_Injector_Vertex(x_coord=xin, y_coord=yin,
def run_model(data, n_chips=None, n_ihcan=0, fs=44100, resample_factor=1): # Set up the simulation g.setup(n_chips_required=n_chips, model_binary_module=model_binaries) # Get the number of cores available for use n_cores = 0 machine = g.machine() # Create a OME for each chip boards = dict() #changed to lists to ensure data is read back in the same order that verticies are instantiated ihcans = list() cf_index = 0 count = 0 for chip in machine.chips: if count >= n_chips: break else: boards[chip.x, chip.y] = chip.ip_address for j in range(n_ihcan): ihcan = IHCANVertex(data[j][:], fs, resample_factor) g.add_machine_vertex_instance(ihcan) # constrain placement to local chip ihcan.add_constraint(ChipAndCoreConstraint(chip.x, chip.y)) #ihcans[chip.x, chip.y,j] = ihcan ihcans.append(ihcan) count = count + 1 # Run the simulation g.run(None) # Wait for the application to finish txrx = g.transceiver() app_id = globals_variables.get_simulator()._app_id #logger.info("Running {} worker cores".format(n_workers)) logger.info("Waiting for application to finish...") running = txrx.get_core_state_count(app_id, CPUState.RUNNING) while running > 0: time.sleep(0.5) error = txrx.get_core_state_count(app_id, CPUState.RUN_TIME_EXCEPTION) watchdog = txrx.get_core_state_count(app_id, CPUState.WATCHDOG) if error > 0 or watchdog > 0: error_msg = "Some cores have failed ({} RTE, {} WDOG)".format( error, watchdog) raise Exception(error_msg) running = txrx.get_core_state_count(app_id, CPUState.RUNNING) # Get the data back samples = list() progress = ProgressBar(len(ihcans), "Reading results") for ihcan in ihcans: samples.append(ihcan.read_samples(g.buffer_manager())) progress.update() progress.end() samples = numpy.hstack(samples) # Close the machine g.stop() print "channels running: ", len(ihcans) / 5.0 print "output data: {} fibres with length {}".format( len(ihcans) * 2, len(samples)) #if(len(samples) != len(ihcans)*2*numpy.floor(len(data[0][0])/100)*100*(1.0/resample_factor)): if (len(samples) != len(ihcans) * 2 * numpy.floor(len(data[0][0]) / 96) * 96): #print "samples length {} isn't expected size {}".format(len(samples),len(ihcans)*2*numpy.floor(len(data[0][0])/100)*100*(1.0/resample_factor)) print "samples length {} isn't expected size {}".format( len(samples), len(ihcans) * 2 * numpy.floor(len(data[0][0]) / 96) * 96) return samples
# Copyright (c) 2017-2019 The University of Manchester # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import spinnaker_graph_front_end as front_end from spinnaker_graph_front_end.examples.Conways.no_edges_examples.\ conways_basic_cell import ( ConwayBasicCell) # set up the front end and ask for the detected machines dimensions front_end.setup() for count in range(0, 60): front_end.add_machine_vertex_instance( ConwayBasicCell("cell{}".format(count))) front_end.run(1) front_end.stop()
def run_mcmc(model, data, n_samples, burn_in=2000, thinning=5, degrees_of_freedom=3.0, seed=None, n_chips=None, n_boards=None): """ Executes an MCMC model, returning the received samples :param model: The MCMCModel to be used :param data: The data to sample :param n_samples: The number of samples to generate :param burn_in:\ no of MCMC transitions to reach apparent equilibrium before\ generating inference samples :param thinning:\ sampling rate i.e. 5 = 1 sample for 5 generated steps :param degrees_of_freedom:\ The number of degrees of freedom to jump around with :param seed: The random seed to use :param n_chips: The number of chips to run the model on :param root_finder: Use the root finder by adding root finder vertices :param cholesky: Use the Cholesky algorithm by adding Cholesky vertices :return: The samples read :rtype: A numpy array with fields for each model state variable """ # Set up the simulation g.setup(n_boards_required=n_boards, n_chips_required=n_chips, model_binary_module=model_binaries) # Get the number of cores available for use n_cores = 0 machine = g.machine() # Create a coordinator for each board coordinators = dict() boards = dict() for chip in machine.ethernet_connected_chips: # Create a coordinator coordinator = MCMCCoordinatorVertex(model, data, n_samples, burn_in, thinning, degrees_of_freedom, seed) g.add_machine_vertex_instance(coordinator) # Put the coordinator on the Ethernet chip coordinator.add_constraint(ChipAndCoreConstraint(chip.x, chip.y)) coordinators[chip.x, chip.y] = coordinator boards[chip.x, chip.y] = chip.ip_address # Go through all the chips and add the workhorses n_chips_on_machine = machine.n_chips n_workers = 0 if (model.root_finder): n_root_finders = 0 if (model.cholesky): n_cholesky = 0 for chip in machine.chips: # Count the cores in the processor # (-1 if this chip also has a coordinator) n_cores = len([p for p in chip.processors if not p.is_monitor]) if (chip.x, chip.y) in coordinators: n_cores -= 3 # coordinator and extra_monitor_support (2) if (model.root_finder): if (model.cholesky): n_cores = n_cores // 3 else: n_cores = n_cores // 2 else: n_cores -= 1 # just extra_monitor_support if (model.root_finder): if (model.cholesky): n_cores = n_cores // 3 else: n_cores = n_cores // 2 # Find the coordinator for the board (or 0, 0 if it is missing) eth_x = chip.nearest_ethernet_x eth_y = chip.nearest_ethernet_y coordinator = coordinators.get((eth_x, eth_y)) if coordinator is None: print("Warning - couldn't find {}, {} for chip {}, {}".format( eth_x, eth_y, chip.x, chip.y)) coordinator = coordinators[0, 0] print("Using coordinator ", coordinator) # hard-code remove some cores (chip power monitor etc.) just # to see what happens # n_cores -= non_worker_cores_per_chip # print 'n_cores: ', n_cores # Add a vertex for each core for _ in range(n_cores): # Create the vertex and add it to the graph vertex = MCMCVertex(coordinator, model) n_workers += 1 g.add_machine_vertex_instance(vertex) # Put the vertex on the same board as the coordinator vertex.add_constraint(ChipAndCoreConstraint(chip.x, chip.y)) # Add an edge from the coordinator to the vertex, to send the data g.add_machine_edge_instance(MachineEdge(coordinator, vertex), coordinator.data_partition_name) # Add an edge from the vertex to the coordinator, # to send acknowledgement g.add_machine_edge_instance(MachineEdge(vertex, coordinator), coordinator.acknowledge_partition_name) if (model.root_finder): # Create a root finder vertex rf_vertex = MCMCRootFinderVertex(vertex, model) n_root_finders += 1 g.add_machine_vertex_instance(rf_vertex) # put it on the same chip as the standard mcmc vertex? # no - put it on a "nearby" chip, however that works rf_vertex.add_constraint(ChipAndCoreConstraint(chip.x, chip.y)) # Add an edge from mcmc vertex to root finder vertex, # to "send" the data - need to work this out g.add_machine_edge_instance(MachineEdge(vertex, rf_vertex), vertex.parameter_partition_name) # Add edge from root finder vertex back to mcmc vertex # to send acknowledgement / result - need to work this out g.add_machine_edge_instance(MachineEdge(rf_vertex, vertex), vertex.result_partition_name) if (model.cholesky): # Create a Cholesky vertex cholesky_vertex = MCMCCholeskyVertex(vertex, model) n_cholesky += 1 g.add_machine_vertex_instance(cholesky_vertex) # put it on the same chip as the standard mcmc vertex? # no - put it on a "nearby" chip, however that works cholesky_vertex.add_constraint( ChipAndCoreConstraint(chip.x, chip.y)) # Add an edge from mcmc vertex to Cholesky vertex, # to "send" the data - need to work this out g.add_machine_edge_instance( MachineEdge(vertex, cholesky_vertex), vertex.cholesky_partition_name) # Add edge from Cholesky vertex back to mcmc vertex # to send acknowledgement / result - need to work this out g.add_machine_edge_instance( MachineEdge(cholesky_vertex, vertex), vertex.cholesky_result_partition_name) start_computing_time = time.time() logger.info("n_chips_on_machine {}".format(n_chips_on_machine)) logger.info("Running {} worker cores".format(n_workers)) if (model.root_finder): logger.info("Running {} root finder cores".format(n_root_finders)) if (model.cholesky): logger.info("Running {} Cholesky cores".format(n_cholesky)) # Run the simulation g.run_until_complete() mid_computing_time = time.time() # Wait for the application to finish txrx = g.transceiver() app_id = globals_variables.get_simulator()._app_id logger.info("Running {} worker cores".format(n_workers)) if (model.root_finder): logger.info("Running {} root finder cores".format(n_root_finders)) if (model.cholesky): logger.info("Running {} Cholesky cores".format(n_cholesky)) logger.info("Waiting for application to finish...") running = txrx.get_core_state_count(app_id, CPUState.RUNNING) # there are now cores doing extra_monitor etc. non_worker_cores = n_chips_on_machine + (2 * len(boards)) while running > non_worker_cores: time.sleep(0.5) error = txrx.get_core_state_count(app_id, CPUState.RUN_TIME_EXCEPTION) watchdog = txrx.get_core_state_count(app_id, CPUState.WATCHDOG) if error > 0 or watchdog > 0: error_msg = "Some cores have failed ({} RTE, {} WDOG)".format( error, watchdog) raise Exception(error_msg) running = txrx.get_core_state_count(app_id, CPUState.RUNNING) print('running: ', running) finish_computing_time = time.time() # Get the data back samples = dict() for coord, coordinator in iteritems(coordinators): samples[coord[0], coord[1]] = coordinator.read_samples(g.buffer_manager()) # Close the machine g.stop() finish_time = time.time() # Note: this timing appears to be incorrect now; needs looking at print("Overhead time is %s seconds" % (start_computing_time - start_time)) print("Computing time is %s seconds" % (finish_computing_time - start_computing_time)) print("run_until_complete takes %s seconds" % (mid_computing_time - start_computing_time)) print("Data collecting time is %s seconds" % (finish_time - finish_computing_time)) print("Overall running time is %s seconds" % (finish_time - start_time)) return samples
from pacman.model.constraints.placer_constraints import ChipAndCoreConstraint from pacman.model.graphs.machine import MachineEdge from spinnaker_graph_front_end.examples.speed_tracker_with_protocol.\ packet_gatherer_with_protocol import \ PacketGathererWithProtocol from spinnaker_graph_front_end.examples.speed_tracker_with_protocol.\ sdram_reader_and_transmitter_with_protocol import \ SDRAMReaderAndTransmitterWithProtocol import time from spinnaker_graph_front_end.examples import speed_tracker_with_protocol # data to write mbs = 20.0 # setup system sim.setup(model_binary_module=speed_tracker_with_protocol) # build verts reader = SDRAMReaderAndTransmitterWithProtocol(mbs) reader.add_constraint(ChipAndCoreConstraint(x=1, y=1)) receiver = PacketGathererWithProtocol() # add verts to graph sim.add_machine_vertex_instance(reader) sim.add_machine_vertex_instance(receiver) # build and add edge to graph sim.add_machine_edge_instance(MachineEdge(reader, receiver), "TRANSMIT") # run forever (to allow better speed testing) sim.run()
def generate_machine_graph (self): """ generates a machine graph for the application graph """ print "generating machine graph" # setup the machine graph g.setup () # set the number of write blocks before generating vertices self._num_write_blks = len (self.output_chain) # create associated weight, sum, input and threshold # machine vertices for every network group for grp in self.groups: # create one weight core per (from_group, group) pair # NOTE: all-zero cores can be optimised out for from_grp in self.groups: wv = WeightVertex (self, grp, from_grp) grp.w_vertices.append (wv) g.add_machine_vertex_instance (wv) self._num_vertices += 1 # create one sum core per group sv = SumVertex (self, grp) grp.s_vertex = sv g.add_machine_vertex_instance (sv) self._num_vertices += 1 # create one input core per group iv = InputVertex (self, grp) grp.i_vertex = iv g.add_machine_vertex_instance (iv) self._num_vertices += 1 # create one sum core per group tv = ThresholdVertex (self, grp) grp.t_vertex = tv g.add_machine_vertex_instance (tv) self._num_vertices += 1 # create associated forward, backprop, synchronisation and # stop machine edges for every network group first = self.groups[0] for grp in self.groups: for w in grp.w_vertices: _frmg = w.from_group # create forward w to s links g.add_machine_edge_instance (MachineEdge (w, grp.s_vertex), w.fwd_link) # create forward t to w (multicast) links g.add_machine_edge_instance (MachineEdge (_frmg.t_vertex, w), _frmg.t_vertex.fwd_link) # create backprop w to s links g.add_machine_edge_instance (MachineEdge (w, _frmg.s_vertex), w.bkp_link) # create backprop i to w (multicast) links g.add_machine_edge_instance (MachineEdge (grp.i_vertex, w), grp.i_vertex.bkp_link) # create forward synchronisation w to t links g.add_machine_edge_instance (MachineEdge (w, _frmg.t_vertex), w.fds_link) # create link delta summation w to s links g.add_machine_edge_instance (MachineEdge (w, grp.s_vertex), w.lds_link) # create link delta summation result s (first) to w links g.add_machine_edge_instance (MachineEdge (first.s_vertex, w), first.s_vertex.lds_link) # create forward s to i link g.add_machine_edge_instance (MachineEdge (grp.s_vertex, grp.i_vertex), grp.s_vertex.fwd_link) # create backprop s to t link g.add_machine_edge_instance (MachineEdge (grp.s_vertex, grp.t_vertex), grp.s_vertex.bkp_link) # create forward i to t link g.add_machine_edge_instance (MachineEdge (grp.i_vertex, grp.t_vertex), grp.i_vertex.fwd_link) # create backprop t to i link g.add_machine_edge_instance (MachineEdge (grp.t_vertex, grp.i_vertex), grp.t_vertex.bkp_link) # create link delta summation s to s links - all s cores # (except the first) send to the first s core if grp != first: print "Creating lds s-s edge from group {} to group {}".\ format (grp.label, first.label) g.add_machine_edge_instance (MachineEdge (grp.s_vertex, first.s_vertex), grp.s_vertex.lds_link) # create stop links, if OUTPUT group if grp in self.output_chain: # if last OUTPUT group broadcast stop decision if grp == self.output_chain[-1]: for stpg in self.groups: # create stop links to all w cores for w in stpg.w_vertices: g.add_machine_edge_instance\ (MachineEdge (grp.t_vertex, w), grp.t_vertex.stp_link) # create stop links to all s cores g.add_machine_edge_instance\ (MachineEdge (grp.t_vertex, stpg.s_vertex),\ grp.t_vertex.stp_link) # create stop links to all i cores g.add_machine_edge_instance\ (MachineEdge (grp.t_vertex, stpg.i_vertex),\ grp.t_vertex.stp_link) # create stop links to t cores (no link to itself!) if stpg != grp: g.add_machine_edge_instance\ (MachineEdge (grp.t_vertex, stpg.t_vertex),\ grp.t_vertex.stp_link) else: # create stop link to next OUTPUT group in chain _inx = self.output_chain.index (grp) _stpg = self.output_chain[_inx + 1] g.add_machine_edge_instance (MachineEdge (grp.t_vertex, _stpg.t_vertex), grp.t_vertex.stp_link)
spinnaker_link_used = 0 print "Loading Dataset" spike_train = [] video_sequence = [] if not use_spinn_link: spike_train, video_sequence, data_time_ms = \ load_vbottle(filename=filename, window_size=constants.US_PER_STEP/1000, tsscaler=0.000000320) if spike_train == -1: quit() print "Dataset goes for {} ms".format(data_time_ms) front_end.setup(n_chips_required=n_chips_required, model_binary_module=cbin, machine_time_step=machine_time_step) # VERTICES particle_list = list() filter_list = list() # create "input" # running with test data use this vertex if use_spinn_link: # when running on the icub we'll need this vertex input_vertex = ICUBInputVertex(spinnaker_link_id=spinnaker_link_used, board_address=None, label="Input Vertex") front_end.add_machine_vertex_instance(input_vertex)
}, label="Data packet at x {}".format(core)) vertices.append(current_vertex) make_circle(vertices, len(vertices), front_end) '''-----------------------------------------------------------------------------------------------------''' #read the csv data with help form the parser class getData = parser('../../resources/date.csv') raw_data = getData.read_data() logger = logging.getLogger(__name__) front_end.setup( n_chips_required=None, model_binary_folder=os.path.dirname(__file__)) ''' calculate total number of 'free' cores for the given board (i.e. does not include those busy with SARK or reinjection)''' total_number_of_cores = \ front_end.get_number_of_available_cores_on_machine() #param1: data #param2: number of chips used #param3: what columns to use #param4: how many string columns exist? #param5: function id load_data_onto_vertices(raw_data, 1, [0], 1, 2) front_end.run(10000)
test_retransmission_phase_on_multi_cores.\ packet_gatherer_with_protocol import \ PacketGathererWithProtocol from spinnaker_graph_front_end.examples.\ test_retransmission_phase_on_multi_cores.\ sdram_reader_and_transmitter_with_protocol import \ SDRAMReaderAndTransmitterWithProtocol import time from spinnaker_graph_front_end.examples import \ test_retransmission_phase_on_multi_cores # data to write mbs = 1.0 # setup system sim.setup(model_binary_module=test_retransmission_phase_on_multi_cores) # build verts reader = SDRAMReaderAndTransmitterWithProtocol(mbs) reader.add_constraint(ChipAndCoreConstraint(x=1, y=1)) receiver = PacketGathererWithProtocol() # add verts to graph sim.add_machine_vertex_instance(reader) sim.add_machine_vertex_instance(receiver) # build and add edge to graph sim.add_machine_edge_instance(MachineEdge(reader, receiver), "TRANSMIT") # run forever (to allow better speed testing) sim.run()
import os import spinnaker_graph_front_end as gfe from pacman.model.graphs.machine import MachineEdge from pkt_injector_vertex import Pkt_Injector_Vertex from pkt_extractor_vertex import Pkt_Extractor_Vertex NUM_INJECTORS = 9 gfe.setup(machine_time_step=1000000, n_chips_required=1, model_binary_folder=os.path.dirname(__file__)) # instantiate injector vertices injectors = [] for i in range(NUM_INJECTORS): iv = Pkt_Injector_Vertex(i) gfe.add_machine_vertex_instance(iv) injectors.append(iv) # instantiate extractor vertices ev = Pkt_Extractor_Vertex() gfe.add_machine_vertex_instance(ev) # create links from injectors to extractor for iv in injectors: gfe.add_machine_edge_instance(MachineEdge(iv, ev), iv.inj_lnk) gfe.run(10000)
# import math import os from pacman.model.graphs.machine import MachineEdge import spinnaker_graph_front_end as front_end from lattice_basic_cell import LatticeBasicCell from lattice_edge import LatticeEdge import global_settings n_chips = (MAX_X_SIZE_OF_FABRIC * MAX_Y_SIZE_OF_FABRIC) // 10 # set up the front end and ask for the detected machines dimensions front_end.setup(n_chips_required=n_chips, model_binary_folder=os.path.dirname( os.path.abspath("__file__")), machine_time_step=time_step, time_scale_factor=time_scale_factor) # figure out if machine can handle simulation cores = front_end.get_number_of_available_cores_on_machine() # chech if there is enough cores if cores <= (MAX_X_SIZE_OF_FABRIC * MAX_Y_SIZE_OF_FABRIC): raise KeyError("Don't have enough cores to run simulation") # contain the vertices for the connection aspect vertices = [[None for _ in range(MAX_X_SIZE_OF_FABRIC)] for _ in range(MAX_Y_SIZE_OF_FABRIC)] # build vertices