def run(self, mbs, x, y): # setup system sim.setup(model_binary_module=( speed_tracker_with_protocol_search_c_code_version)) # build verts reader = SDRAMReaderAndTransmitterWithProtocol(mbs) reader.add_constraint(ChipAndCoreConstraint(x=x, y=y)) receiver = PacketGathererWithProtocol() # add verts to graph sim.add_machine_vertex_instance(reader) sim.add_machine_vertex_instance(receiver) # build and add edge to graph sim.add_machine_edge_instance(MachineEdge(reader, receiver), "TRANSMIT") machine = sim.machine() if machine.is_chip_at(x, y): return self._do_run(reader, receiver, mbs) else: sim.stop() return None, False, False, "", 0
def _get_gatherer_for_monitor(monitor): placement = sim.placements().get_placement_of_vertex(monitor) chip = sim.machine().get_chip_at(placement.x, placement.y) the_sim = sim.globals_variables.get_simulator() # pylint: disable=protected-access gatherers = the_sim._vertex_to_ethernet_connected_chip_mapping return (gatherers, gatherers[chip.nearest_ethernet_x, chip.nearest_ethernet_y])
def _get_gatherer_for_monitor(monitor): placement = sim.placements().get_placement_of_vertex(monitor) chip = sim.machine().get_chip_at(placement.x, placement.y) the_sim = sim.globals_variables.get_simulator() # pylint: disable=protected-access gatherers = the_sim._last_run_outputs[_GATHERER_MAP] return (gatherers, gatherers[chip.nearest_ethernet_x, chip.nearest_ethernet_y])
def run(self, mbs): # setup system sim.setup(model_binary_module=test_extra_monitor_core_data_extraction, n_chips_required=2) # build verts writer = SDRAMWriter(mbs) # add verts to graph sim.add_machine_vertex_instance(writer) sim.run(12) # get placements for extraction placements = sim.placements() machine = sim.machine() writer_placement = placements.get_placement_of_vertex(writer) writer_chip = \ machine.get_chip_at(writer_placement.x, writer_placement.y) writer_nearest_ethernet = machine.get_chip_at( writer_chip.nearest_ethernet_x, writer_chip.nearest_ethernet_y) extra_monitor_vertices = sim.globals_variables.\ get_simulator()._last_run_outputs['MemoryExtraMonitorVertices'] extra_monitor_gatherers = sim.globals_variables.\ get_simulator()._last_run_outputs[ 'MemoryMCGatherVertexToEthernetConnectedChipMapping'] receiver = None gatherer = extra_monitor_gatherers[(writer_nearest_ethernet.x, writer_nearest_ethernet.y)] for vertex in extra_monitor_vertices: placement = placements.get_placement_of_vertex(vertex) if (placement.x == writer_placement.x and placement.y == writer_placement.y): receiver = vertex start = float(time.time()) gatherer.set_cores_for_data_extraction(sim.transceiver(), extra_monitor_vertices, placements) data = gatherer.get_data( sim.transceiver(), placements.get_placement_of_vertex(receiver), self._get_data_region_address(sim.transceiver(), writer_placement), writer.mbs_in_bytes) gatherer.unset_cores_for_data_extraction(sim.transceiver(), extra_monitor_vertices, placements) end = float(time.time()) print("time taken to extract {} MB is {}. MBS of {}".format( mbs, end - start, (mbs * 8) / (end - start))) self._check_data(data)
def run(self, mbs, number_of_repeats): # setup system sim.setup(model_binary_module=( test_extra_monitor_core_data_extraction_multiple_locations), n_chips_required=49 * 2) # build vertices locs = [(0, 0), (2, 2), (7, 7), (3, 0), (1, 0), (0, 1), (3, 3), (4, 4), (5, 5), (3, 5), (4, 0), (7, 4), (8, 4), (4, 8), (11, 11), (11, 0), (0, 11), (6, 3), (0, 6)] writers = list() for chip_x, chip_y in locs: writer = SDRAMWriter(mbs, constraint=ChipAndCoreConstraint( chip_x, chip_y)) # add vertices to graph sim.add_machine_vertex_instance(writer) writers.append(writer) sim.run(12) # get placements for extraction placements = sim.placements() machine = sim.machine() extra_monitor_vertices = sim.globals_variables. \ get_simulator()._last_run_outputs[ 'MemoryExtraMonitorVertices'] extra_monitor_gatherers = sim.globals_variables. \ get_simulator()._last_run_outputs[ 'MemoryMCGatherVertexToEthernetConnectedChipMapping'] time_out_setter = extra_monitor_gatherers[(0, 0)] time_out_setter.set_cores_for_data_extraction(sim.transceiver(), extra_monitor_vertices, placements) for _ in range(0, number_of_repeats): for writer in writers: writer_placement = placements.get_placement_of_vertex(writer) writer_chip = \ machine.get_chip_at(writer_placement.x, writer_placement.y) writer_nearest_ethernet = machine.get_chip_at( writer_chip.nearest_ethernet_x, writer_chip.nearest_ethernet_y) receiver = None gatherer = extra_monitor_gatherers[(writer_nearest_ethernet.x, writer_nearest_ethernet.y)] for vertex in extra_monitor_vertices: placement = placements.get_placement_of_vertex(vertex) if (placement.x == writer_placement.x and placement.y == writer_placement.y): receiver = vertex start = float(time.time()) data = gatherer.get_data( sim.transceiver(), placements.get_placement_of_vertex(receiver), self._get_data_region_address(sim.transceiver(), writer_placement), writer.mbs_in_bytes) end = float(time.time()) print("time taken to extract {} MB is {}. MBS of {}".format( mbs, end - start, (mbs * 8) / (end - start))) self._check_data(data) time_out_setter.unset_cores_for_data_extraction( sim.transceiver(), extra_monitor_vertices, placements)
def run_mcmc(model, data, n_samples, burn_in=2000, thinning=5, degrees_of_freedom=3.0, seed=None, n_chips=None, n_boards=None): """ Executes an MCMC model, returning the received samples :param model: The MCMCModel to be used :param data: The data to sample :param n_samples: The number of samples to generate :param burn_in:\ no of MCMC transitions to reach apparent equilibrium before\ generating inference samples :param thinning:\ sampling rate i.e. 5 = 1 sample for 5 generated steps :param degrees_of_freedom:\ The number of degrees of freedom to jump around with :param seed: The random seed to use :param n_chips: The number of chips to run the model on :param root_finder: Use the root finder by adding root finder vertices :param cholesky: Use the Cholesky algorithm by adding Cholesky vertices :return: The samples read :rtype: A numpy array with fields for each model state variable """ # Set up the simulation g.setup(n_boards_required=n_boards, n_chips_required=n_chips, model_binary_module=model_binaries) # Get the number of cores available for use n_cores = 0 machine = g.machine() # Create a coordinator for each board coordinators = dict() boards = dict() for chip in machine.ethernet_connected_chips: # Create a coordinator coordinator = MCMCCoordinatorVertex(model, data, n_samples, burn_in, thinning, degrees_of_freedom, seed) g.add_machine_vertex_instance(coordinator) # Put the coordinator on the Ethernet chip coordinator.add_constraint(ChipAndCoreConstraint(chip.x, chip.y)) coordinators[chip.x, chip.y] = coordinator boards[chip.x, chip.y] = chip.ip_address # Go through all the chips and add the workhorses n_chips_on_machine = machine.n_chips n_workers = 0 if (model.root_finder): n_root_finders = 0 if (model.cholesky): n_cholesky = 0 for chip in machine.chips: # Count the cores in the processor # (-1 if this chip also has a coordinator) n_cores = len([p for p in chip.processors if not p.is_monitor]) if (chip.x, chip.y) in coordinators: n_cores -= 3 # coordinator and extra_monitor_support (2) if (model.root_finder): if (model.cholesky): n_cores = n_cores // 3 else: n_cores = n_cores // 2 else: n_cores -= 1 # just extra_monitor_support if (model.root_finder): if (model.cholesky): n_cores = n_cores // 3 else: n_cores = n_cores // 2 # Find the coordinator for the board (or 0, 0 if it is missing) eth_x = chip.nearest_ethernet_x eth_y = chip.nearest_ethernet_y coordinator = coordinators.get((eth_x, eth_y)) if coordinator is None: print("Warning - couldn't find {}, {} for chip {}, {}".format( eth_x, eth_y, chip.x, chip.y)) coordinator = coordinators[0, 0] print("Using coordinator ", coordinator) # hard-code remove some cores (chip power monitor etc.) just # to see what happens # n_cores -= non_worker_cores_per_chip # print 'n_cores: ', n_cores # Add a vertex for each core for _ in range(n_cores): # Create the vertex and add it to the graph vertex = MCMCVertex(coordinator, model) n_workers += 1 g.add_machine_vertex_instance(vertex) # Put the vertex on the same board as the coordinator vertex.add_constraint(ChipAndCoreConstraint(chip.x, chip.y)) # Add an edge from the coordinator to the vertex, to send the data g.add_machine_edge_instance(MachineEdge(coordinator, vertex), coordinator.data_partition_name) # Add an edge from the vertex to the coordinator, # to send acknowledgement g.add_machine_edge_instance(MachineEdge(vertex, coordinator), coordinator.acknowledge_partition_name) if (model.root_finder): # Create a root finder vertex rf_vertex = MCMCRootFinderVertex(vertex, model) n_root_finders += 1 g.add_machine_vertex_instance(rf_vertex) # put it on the same chip as the standard mcmc vertex? # no - put it on a "nearby" chip, however that works rf_vertex.add_constraint(ChipAndCoreConstraint(chip.x, chip.y)) # Add an edge from mcmc vertex to root finder vertex, # to "send" the data - need to work this out g.add_machine_edge_instance(MachineEdge(vertex, rf_vertex), vertex.parameter_partition_name) # Add edge from root finder vertex back to mcmc vertex # to send acknowledgement / result - need to work this out g.add_machine_edge_instance(MachineEdge(rf_vertex, vertex), vertex.result_partition_name) if (model.cholesky): # Create a Cholesky vertex cholesky_vertex = MCMCCholeskyVertex(vertex, model) n_cholesky += 1 g.add_machine_vertex_instance(cholesky_vertex) # put it on the same chip as the standard mcmc vertex? # no - put it on a "nearby" chip, however that works cholesky_vertex.add_constraint( ChipAndCoreConstraint(chip.x, chip.y)) # Add an edge from mcmc vertex to Cholesky vertex, # to "send" the data - need to work this out g.add_machine_edge_instance( MachineEdge(vertex, cholesky_vertex), vertex.cholesky_partition_name) # Add edge from Cholesky vertex back to mcmc vertex # to send acknowledgement / result - need to work this out g.add_machine_edge_instance( MachineEdge(cholesky_vertex, vertex), vertex.cholesky_result_partition_name) start_computing_time = time.time() logger.info("n_chips_on_machine {}".format(n_chips_on_machine)) logger.info("Running {} worker cores".format(n_workers)) if (model.root_finder): logger.info("Running {} root finder cores".format(n_root_finders)) if (model.cholesky): logger.info("Running {} Cholesky cores".format(n_cholesky)) # Run the simulation g.run_until_complete() mid_computing_time = time.time() # Wait for the application to finish txrx = g.transceiver() app_id = globals_variables.get_simulator()._app_id logger.info("Running {} worker cores".format(n_workers)) if (model.root_finder): logger.info("Running {} root finder cores".format(n_root_finders)) if (model.cholesky): logger.info("Running {} Cholesky cores".format(n_cholesky)) logger.info("Waiting for application to finish...") running = txrx.get_core_state_count(app_id, CPUState.RUNNING) # there are now cores doing extra_monitor etc. non_worker_cores = n_chips_on_machine + (2 * len(boards)) while running > non_worker_cores: time.sleep(0.5) error = txrx.get_core_state_count(app_id, CPUState.RUN_TIME_EXCEPTION) watchdog = txrx.get_core_state_count(app_id, CPUState.WATCHDOG) if error > 0 or watchdog > 0: error_msg = "Some cores have failed ({} RTE, {} WDOG)".format( error, watchdog) raise Exception(error_msg) running = txrx.get_core_state_count(app_id, CPUState.RUNNING) print('running: ', running) finish_computing_time = time.time() # Get the data back samples = dict() for coord, coordinator in iteritems(coordinators): samples[coord[0], coord[1]] = coordinator.read_samples(g.buffer_manager()) # Close the machine g.stop() finish_time = time.time() # Note: this timing appears to be incorrect now; needs looking at print("Overhead time is %s seconds" % (start_computing_time - start_time)) print("Computing time is %s seconds" % (finish_computing_time - start_computing_time)) print("run_until_complete takes %s seconds" % (mid_computing_time - start_computing_time)) print("Data collecting time is %s seconds" % (finish_time - finish_computing_time)) print("Overall running time is %s seconds" % (finish_time - start_time)) return samples
def run_model(data, n_chips=None, n_ihcan=0, fs=44100, resample_factor=1): # Set up the simulation g.setup(n_chips_required=n_chips, model_binary_module=model_binaries) # Get the number of cores available for use n_cores = 0 machine = g.machine() # Create a OME for each chip boards = dict() #changed to lists to ensure data is read back in the same order that verticies are instantiated ihcans = list() cf_index = 0 count = 0 for chip in machine.chips: if count >= n_chips: break else: boards[chip.x, chip.y] = chip.ip_address for j in range(n_ihcan): ihcan = IHCANVertex(data[j][:], fs, resample_factor) g.add_machine_vertex_instance(ihcan) # constrain placement to local chip ihcan.add_constraint(ChipAndCoreConstraint(chip.x, chip.y)) #ihcans[chip.x, chip.y,j] = ihcan ihcans.append(ihcan) count = count + 1 # Run the simulation g.run(None) # Wait for the application to finish txrx = g.transceiver() app_id = globals_variables.get_simulator()._app_id #logger.info("Running {} worker cores".format(n_workers)) logger.info("Waiting for application to finish...") running = txrx.get_core_state_count(app_id, CPUState.RUNNING) while running > 0: time.sleep(0.5) error = txrx.get_core_state_count(app_id, CPUState.RUN_TIME_EXCEPTION) watchdog = txrx.get_core_state_count(app_id, CPUState.WATCHDOG) if error > 0 or watchdog > 0: error_msg = "Some cores have failed ({} RTE, {} WDOG)".format( error, watchdog) raise Exception(error_msg) running = txrx.get_core_state_count(app_id, CPUState.RUNNING) # Get the data back samples = list() progress = ProgressBar(len(ihcans), "Reading results") for ihcan in ihcans: samples.append(ihcan.read_samples(g.buffer_manager())) progress.update() progress.end() samples = numpy.hstack(samples) # Close the machine g.stop() print "channels running: ", len(ihcans) / 5.0 print "output data: {} fibres with length {}".format( len(ihcans) * 2, len(samples)) #if(len(samples) != len(ihcans)*2*numpy.floor(len(data[0][0])/100)*100*(1.0/resample_factor)): if (len(samples) != len(ihcans) * 2 * numpy.floor(len(data[0][0]) / 96) * 96): #print "samples length {} isn't expected size {}".format(len(samples),len(ihcans)*2*numpy.floor(len(data[0][0])/100)*100*(1.0/resample_factor)) print "samples length {} isn't expected size {}".format( len(samples), len(ihcans) * 2 * numpy.floor(len(data[0][0]) / 96) * 96) return samples
def run_broken(): machine_time_step = 1000 time_scale_factor = 1 # machine_port = 11111 machine_receive_port = 22222 machine_host = "0.0.0.0" live_gatherer_label = "LiveHeatGatherer" notify_port = 19999 database_listen_port = 19998 # set up the front end and ask for the detected machines dimensions front_end.setup( graph_label="heat_demo_graph", model_binary_module=sys.modules[__name__], database_socket_addresses={SocketAddress( "127.0.0.1", notify_port, database_listen_port)}) machine = front_end.machine() # create a live gatherer vertex for each board default_gatherer = None live_gatherers = dict() used_cores = set() for chip in machine.ethernet_connected_chips: # Try to use core 17 if one is available as it is outside the grid processor = chip.get_processor_with_id(17) if processor is None or processor.is_monitor: processor = chip.get_first_none_monitor_processor() if processor is not None: live_gatherer = front_end.add_machine_vertex( LivePacketGatherMachineVertex, { 'label': live_gatherer_label, 'ip_address': machine_host, 'port': machine_receive_port, 'payload_as_time_stamps': False, 'use_payload_prefix': False, 'strip_sdp': True, 'message_type': EIEIOType.KEY_PAYLOAD_32_BIT } ) live_gatherers[chip.x, chip.y] = live_gatherer used_cores.add((chip.x, chip.y, processor.processor_id)) if default_gatherer is None: default_gatherer = live_gatherer # Create a list of lists of vertices (x * 4) by (y * 4) # (for 16 cores on a chip - missing cores will have missing vertices) max_x_element_id = (machine.max_chip_x + 1) * 4 max_y_element_id = (machine.max_chip_y + 1) * 4 vertices = [ [None for _ in range(max_y_element_id)] for _ in range(max_x_element_id) ] receive_labels = list() for x in range(0, max_x_element_id): for y in range(0, max_y_element_id): chip_x = x / 4 chip_y = y / 4 core_x = x % 4 core_y = y % 4 core_p = ((core_x * 4) + core_y) + 1 # Add an element if the chip and core exists chip = machine.get_chip_at(chip_x, chip_y) if chip is not None: core = chip.get_processor_with_id(core_p) if (core is not None and not core.is_monitor and (chip_x, chip_y, core_p) not in used_cores): element = front_end.add_machine_vertex( HeatDemoVertex, { 'machine_time_step': machine_time_step, 'time_scale_factor': time_scale_factor }, label="Heat Element {}, {}".format( x, y)) vertices[x][y] = element vertices[x][y].add_constraint( ChipAndCoreConstraint(chip_x, chip_y, core_p)) # add a link from the heat element to the live packet # gatherer live_gatherer = live_gatherers.get( (chip.nearest_ethernet_x, chip.nearest_ethernet_y), default_gatherer) front_end.add_machine_edge( MachineEdge, { 'pre_vertex': vertices[x][y], 'post_vertex': live_gatherer }, label="Live output from {}, {}".format(x, y), semantic_label="TRANSMISSION") receive_labels.append(vertices[x][y].label) # build edges for x in range(0, max_x_element_id): for y in range(0, max_y_element_id): if vertices[x][y] is not None: # Add a north link if not at the top if y+1 < max_y_element_id and vertices[x][y+1] is not None: front_end.add_machine_edge( HeatDemoEdge, { 'pre_vertex': vertices[x][y], 'post_vertex': vertices[x][y + 1], 'direction': HeatDemoEdge.DIRECTIONS.SOUTH }, label="North Edge from {}, {} to {}, {}".format( x, y, x + 1, y), semantic_label="TRANSMISSION") # Add an east link if not at the right if x+1 < max_y_element_id and vertices[x+1][y] is not None: front_end.add_machine_edge( HeatDemoEdge, { 'pre_vertex': vertices[x][y], 'post_vertex': vertices[x + 1][y], 'direction': HeatDemoEdge.DIRECTIONS.WEST }, label="East Edge from {}, {} to {}, {}".format( x, y, x + 1, y), semantic_label="TRANSMISSION") # Add a south link if not at the bottom if (y - 1) >= 0 and vertices[x][y - 1] is not None: front_end.add_machine_edge( HeatDemoEdge, { 'pre_vertex': vertices[x][y], 'post_vertex': vertices[x][y - 1], 'direction': HeatDemoEdge.DIRECTIONS.NORTH }, label="South Edge from {}, {} to {}, {}".format( x, y, x, y - 1), semantic_label="TRANSMISSION") # check for the likely hood for a W link if (x - 1) >= 0 and vertices[x - 1][y] is not None: front_end.add_machine_edge( HeatDemoEdge, { 'pre_vertex': vertices[x][y], 'post_vertex': vertices[x - 1][y], 'direction': HeatDemoEdge.DIRECTIONS.EAST }, label="West Edge from {}, {} to {}, {}".format( x, y, x - 1, y), semantic_label="TRANSMISSION") # Set up the live connection for receiving heat elements live_heat_connection = LiveEventConnection( live_gatherer_label, receive_labels=receive_labels, local_port=notify_port, machine_vertices=True) heat_values = defaultdict(list) condition = Condition() def receive_heat(label, atom, value): with condition: print "{}: {}".format(label, value / 65536.0) # Set up callbacks to occur when spikes are received for label in receive_labels: live_heat_connection.add_receive_callback(label, receive_heat) front_end.run(1000) front_end.stop() for label in receive_labels: print "{}: {}".format( label, ["{:05.2f}".format(value) for value in heat_values[label]])