def get_gsyn(self, gather=True, compatible_output=False): """ Return a 3-column numpy array containing cell ids and synaptic conductances for recorded cells. """ if self._gsyn is None: if not self._vertex.record_gsyn: raise exceptions.ConfigurationException( "This population has not been set to record gsyn. " "Therefore gsyn cannot be retrieved. Please set this " "vertex to record gsyn before running this command.") if not self._spinnaker.has_ran: raise local_exceptions.SpynnakerException( "The simulation has not yet run, therefore gsyn cannot" " be retrieved. Please execute the simulation before" " running this command") timer = None if conf.config.getboolean("Reports", "outputTimesForSections"): timer = Timer() timer.start_timing() self._gsyn = self._vertex.get_gsyn( has_ran=self._spinnaker.has_ran, txrx=self._spinnaker.transceiver, placements=self._spinnaker.placements, machine_time_step=self._spinnaker.machine_time_step, graph_mapper=self._spinnaker.graph_mapper, compatible_output=compatible_output, runtime=self._spinnaker._runtime) if conf.config.getboolean("Reports", "outputTimesForSections"): timer.take_sample() return self._gsyn
def get_v(self, gather=True, compatible_output=False): """ Return a 3-column numpy array containing cell ids, time, and Vm for recorded cells. :param gather: not used - inserted to match PyNN specs :type gather: bool :param compatible_output: not used - inserted to match PyNN specs :type compatible_output: bool """ if self._v is None: timer = None if conf.config.getboolean("Reports", "outputTimesForSections"): timer = Timer() timer.start_timing() self._v = self._vertex.get_v( has_ran=self._spinnaker.has_ran, txrx=self._spinnaker.transceiver, placements=self._spinnaker.placements, machine_time_step=self._spinnaker.machine_time_step, graph_mapper=self._spinnaker.graph_mapper, compatible_output=compatible_output) if conf.config.getboolean("Reports", "outputTimesForSections"): timer.take_sample() return self._v
def getSpikes(self, compatible_output=False, gather=True): """ Return a 2-column numpy array containing cell ids and spike times for recorded cells. This is read directly from the memory for the board. """ if self._spikes is None: if not gather: logger.warn("Spynnaker only supports gather = true, will " " execute as if gather was true anyhow") timer = None if not self._vertex.record: raise exceptions.ConfigurationException( "This population has not been set to record spikes. " "Therefore spikes cannot be retrieved. Please set this " "vertex to record spikes before running this command.") if not self._spinnaker.has_ran: raise local_exceptions.SpynnakerException( "The simulation has not yet ran, therefore spikes cannot" " be retrieved. Please execute the simulation before" " running this command") if conf.config.getboolean("Reports", "outputTimesForSections"): timer = Timer() timer.start_timing() self._spikes = self._vertex.get_spikes( txrx=self._spinnaker.transceiver, placements=self._spinnaker.placements, graph_mapper=self._spinnaker.graph_mapper, compatible_output=compatible_output) if conf.config.getboolean("Reports", "outputTimesForSections"): timer.take_sample() return self._spikes
def getSpikes(self, compatible_output=False, gather=True): """ Return a 2-column numpy array containing cell ids and spike times for recorded cells. This is read directly from the memory for the board. """ if self._spikes is None: if not gather: logger.warn("Spynnaker only supports gather = true, will " " execute as if gather was true anyhow") timer = None if not self._vertex.record: raise exceptions.ConfigurationException( "This population has not been set to record spikes. " "Therefore spikes cannot be retrieved. Please set this " "vertex to record spikes before running this command.") if not self._spinnaker.has_ran: raise local_exceptions.SpynnakerException( "The simulation has not yet run, therefore spikes cannot" " be retrieved. Please execute the simulation before" " running this command") if conf.config.getboolean("Reports", "outputTimesForSections"): timer = Timer() timer.start_timing() self._spikes = self._vertex.get_spikes( txrx=self._spinnaker.transceiver, placements=self._spinnaker.placements, graph_mapper=self._spinnaker.graph_mapper, compatible_output=compatible_output) if conf.config.getboolean("Reports", "outputTimesForSections"): timer.take_sample() return self._spikes
def test_timer(self): timer = Timer() timer.start_timing() sleep(0.1) end_time = timer.take_sample().total_seconds() self.assertAlmostEqual(end_time, 0.1, delta=0.02) sleep(0.2) self.assertAlmostEqual(timer.take_sample().total_seconds(), end_time + 0.2, delta=0.02)
def get_synaptic_list_from_machine(self, graph_mapper, partitioned_graph, placements, transceiver, routing_infos): """ Get synaptic data for all connections in this Projection from the machine. """ if self._stored_synaptic_data_from_machine is None: timer = None if conf.config.getboolean("Reports", "outputTimesForSections"): timer = Timer() timer.start_timing() logger.debug("Reading synapse data for edge between {} and {}" .format(self._pre_vertex.label, self._post_vertex.label)) subedges = \ graph_mapper.get_partitioned_edges_from_partitionable_edge( self) if subedges is None: subedges = list() synaptic_list = [SynapseRowInfo([], [], [], []) for _ in range(self._pre_vertex.n_atoms)] progress_bar = ProgressBar( len(subedges), "progress on reading back synaptic matrix") for subedge in subedges: n_rows = subedge.get_n_rows(graph_mapper) pre_vertex_slice = \ graph_mapper.get_subvertex_slice(subedge.pre_subvertex) post_vertex_slice = \ graph_mapper.get_subvertex_slice(subedge.post_subvertex) sub_edge_post_vertex = \ graph_mapper.get_vertex_from_subvertex( subedge.post_subvertex) rows = sub_edge_post_vertex.get_synaptic_list_from_machine( placements, transceiver, subedge.pre_subvertex, n_rows, subedge.post_subvertex, self._synapse_row_io, partitioned_graph, routing_infos, subedge.weight_scales).get_rows() for i in range(len(rows)): synaptic_list[i + pre_vertex_slice.lo_atom].append( rows[i], lo_atom=post_vertex_slice.lo_atom) progress_bar.update() progress_bar.end() self._stored_synaptic_data_from_machine = SynapticList( synaptic_list) if conf.config.getboolean("Reports", "outputTimesForSections"): timer.take_sample() return self._stored_synaptic_data_from_machine
def get_synaptic_list_from_machine(self, graph_mapper, partitioned_graph, placements, transceiver, routing_infos): """ Get synaptic data for all connections in this Projection from the machine. """ if self._stored_synaptic_data_from_machine is None: timer = None if conf.config.getboolean("Reports", "outputTimesForSections"): timer = Timer() timer.start_timing() logger.debug( "Reading synapse data for edge between {} and {}".format( self._pre_vertex.label, self._post_vertex.label)) subedges = \ graph_mapper.get_partitioned_edges_from_partitionable_edge( self) if subedges is None: subedges = list() synaptic_list = copy.copy(self._synapse_list) synaptic_list_rows = synaptic_list.get_rows() progress_bar = ProgressBar( len(subedges), "progress on reading back synaptic matrix") for subedge in subedges: n_rows = subedge.get_n_rows(graph_mapper) pre_vertex_slice = \ graph_mapper.get_subvertex_slice(subedge.pre_subvertex) post_vertex_slice = \ graph_mapper.get_subvertex_slice(subedge.post_subvertex) sub_edge_post_vertex = \ graph_mapper.get_vertex_from_subvertex( subedge.post_subvertex) rows = sub_edge_post_vertex.get_synaptic_list_from_machine( placements, transceiver, subedge.pre_subvertex, n_rows, subedge.post_subvertex, self._synapse_row_io, partitioned_graph, routing_infos, subedge.weight_scales).get_rows() for i in range(len(rows)): synaptic_list_rows[ i + pre_vertex_slice.lo_atom].set_slice_values( rows[i], vertex_slice=post_vertex_slice) progress_bar.update() progress_bar.end() self._stored_synaptic_data_from_machine = synaptic_list if conf.config.getboolean("Reports", "outputTimesForSections"): timer.take_sample() return self._stored_synaptic_data_from_machine
def get_v(self, gather=True, compatible_output=False): """ Return a 3-column numpy array containing cell ids, time, and Vm for recorded cells. :param gather: not used - inserted to match PyNN specs :type gather: bool :param compatible_output: not used - inserted to match PyNN specs :type compatible_output: bool """ if self._v is None: if not self._vertex.record_v: raise exceptions.ConfigurationException( "This population has not been set to record v. " "Therefore v cannot be retrieved. Please set this " "vertex to record v before running this command.") if not self._spinnaker.has_ran: raise local_exceptions.SpynnakerException( "The simulation has not yet run, therefore v cannot" " be retrieved. Please execute the simulation before" " running this command") timer = None if conf.config.getboolean("Reports", "outputTimesForSections"): timer = Timer() timer.start_timing() self._v = self._vertex.get_v( has_ran=self._spinnaker.has_ran, txrx=self._spinnaker.transceiver, placements=self._spinnaker.placements, machine_time_step=self._spinnaker.machine_time_step, graph_mapper=self._spinnaker.graph_mapper, compatible_output=compatible_output, runtime=self._spinnaker._runtime) if conf.config.getboolean("Reports", "outputTimesForSections"): timer.take_sample() return self._v
def get_gsyn(self, gather=True, compatible_output=False): """ Return a 3-column numpy array containing cell ids and synaptic conductances for recorded cells. """ if self._gsyn is None: timer = None if conf.config.getboolean("Reports", "outputTimesForSections"): timer = Timer() timer.start_timing() self._gsyn = self._vertex.get_gsyn( has_ran=self._spinnaker.has_ran, txrx=self._spinnaker.transceiver, placements=self._spinnaker.placements, machine_time_step=self._spinnaker.machine_time_step, graph_mapper=self._spinnaker.graph_mapper, compatible_output=compatible_output) if conf.config.getboolean("Reports", "outputTimesForSections"): timer.take_sample() return self._gsyn
def get_v(self, gather=True, compatible_output=False): """ Return a 3-column numpy array containing cell ids, time, and Vm for recorded cells. :param gather: not used - inserted to match PyNN specs :type gather: bool :param compatible_output: not used - inserted to match PyNN specs :type compatible_output: bool """ if self._v_cache_file is None: if not self._vertex.is_recording_v(): raise exceptions.ConfigurationException( "This population has not been set to record v. " "Therefore v cannot be retrieved. Please set this " "vertex to record v before running this command.") if not self._spinnaker.has_ran: raise local_exceptions.SpynnakerException( "The simulation has not yet run, therefore v cannot" " be retrieved. Please execute the simulation before" " running this command") timer = None if conf.config.getboolean("Reports", "outputTimesForSections"): timer = Timer() timer.start_timing() v = self._vertex.get_v( self._spinnaker.transceiver, self._spinnaker.no_machine_time_steps, self._spinnaker.placements, self._spinnaker.graph_mapper) if conf.config.getboolean("Reports", "outputTimesForSections"): logger.info("Time to read v: {}".format(timer.take_sample())) self._v_cache_file = tempfile.NamedTemporaryFile() numpy.save(self._v_cache_file, v) return v # Reload the data self._v_cache_file.seek(0) return numpy.load(self._v_cache_file)
def getSpikes(self, compatible_output=False, gather=True): """ Return a 2-column numpy array containing cell ids and spike times for recorded cells. This is read directly from the memory for the board. """ if self._spikes_cache_file is None: if not gather: logger.warn("Spynnaker only supports gather = true, will " " execute as if gather was true anyhow") timer = None if not self._vertex.is_recording_spikes(): raise exceptions.ConfigurationException( "This population has not been set to record spikes. " "Therefore spikes cannot be retrieved. Please set this " "vertex to record spikes before running this command.") if not self._spinnaker.has_ran: raise local_exceptions.SpynnakerException( "The simulation has not yet run, therefore spikes cannot" " be retrieved. Please execute the simulation before" " running this command") if conf.config.getboolean("Reports", "outputTimesForSections"): timer = Timer() timer.start_timing() spikes = self._vertex.get_spikes( self._spinnaker.transceiver, self._spinnaker.no_machine_time_steps, self._spinnaker.placements, self._spinnaker.graph_mapper) if conf.config.getboolean("Reports", "outputTimesForSections"): logger.info("Time to get spikes: {}".format( timer.take_sample())) self._spikes_cache_file = tempfile.NamedTemporaryFile() numpy.save(self._spikes_cache_file, spikes) return spikes # Load from the file self._spikes_cache_file.seek(0) return numpy.load(self._spikes_cache_file)
def get_gsyn(self, gather=True, compatible_output=False): """ Return a 3-column numpy array containing cell ids and synaptic conductances for recorded cells. """ if self._gsyn_cache_file is None: if not self._vertex.is_recording_gsyn(): raise exceptions.ConfigurationException( "This population has not been set to record gsyn. " "Therefore gsyn cannot be retrieved. Please set this " "vertex to record gsyn before running this command.") if not self._spinnaker.has_ran: raise local_exceptions.SpynnakerException( "The simulation has not yet run, therefore gsyn cannot" " be retrieved. Please execute the simulation before" " running this command") timer = None if conf.config.getboolean("Reports", "outputTimesForSections"): timer = Timer() timer.start_timing() gsyn = self._vertex.get_gsyn( self._spinnaker.transceiver, self._spinnaker.no_machine_time_steps, self._spinnaker.placements, self._spinnaker.graph_mapper) if conf.config.getboolean("Reports", "outputTimesForSections"): logger.info("Time to get gsyn: {}".format(timer.take_sample())) self._gsyn_cache_file = tempfile.NamedTemporaryFile() numpy.save(self._gsyn_cache_file, gsyn) return gsyn # Reload the data self._gsyn_cache_file.seek(0) numpy.load(self._gsyn_cache_file)
def run(self, run_time): """ :param run_time: :return: """ self._setup_interfaces( hostname=self._hostname, virtual_x_dimension=config.getint("Machine", "virtual_board_x_dimension"), virtual_y_dimension=config.getint("Machine", "virtual_board_y_dimension"), downed_chips=config.get("Machine", "down_chips"), downed_cores=config.get("Machine", "down_cores"), requires_virtual_board=config.getboolean("Machine", "virtual_board"), requires_wrap_around=config.getboolean("Machine", "requires_wrap_arounds"), machine_version=config.getint("Machine", "version")) # add database generation if requested if self._create_database: wait_on_confirmation = \ config.getboolean("Database", "wait_on_confirmation") self._database_interface = DataBaseInterface( self._app_data_runtime_folder, wait_on_confirmation, self._database_socket_addresses) # create network report if needed if self._reports_states is not None: reports.network_specification_partitionable_report( self._report_default_directory, self._partitionable_graph, self._hostname) # calculate number of machine time steps if run_time is not None: self._no_machine_time_steps =\ int((run_time * 1000.0) / self._machine_time_step) ceiled_machine_time_steps = \ math.ceil((run_time * 1000.0) / self._machine_time_step) if self._no_machine_time_steps != ceiled_machine_time_steps: raise common_exceptions.ConfigurationException( "The runtime and machine time step combination result in " "a factional number of machine runable time steps and " "therefore spinnaker cannot determine how many to run for") for vertex in self._partitionable_graph.vertices: if isinstance(vertex, AbstractDataSpecableVertex): vertex.set_no_machine_time_steps( self._no_machine_time_steps) else: self._no_machine_time_steps = None logger.warn("You have set a runtime that will never end, this may" "cause the neural models to fail to partition " "correctly") for vertex in self._partitionable_graph.vertices: if vertex.is_set_to_record_spikes(): raise common_exceptions.ConfigurationException( "recording a population when set to infinite runtime " "is not currently supportable in this tool chain." "watch this space") do_timing = config.getboolean("Reports", "outputTimesForSections") if do_timing: timer = Timer() else: timer = None self.set_runtime(run_time) logger.info("*** Running Mapper *** ") if do_timing: timer.start_timing() self.map_model() if do_timing: timer.take_sample() # load database if needed if self._create_database: self._database_interface.add_system_params( self._time_scale_factor, self._machine_time_step, self._runtime) self._database_interface.add_machine_objects(self._machine) self._database_interface.add_partitionable_vertices( self._partitionable_graph) self._database_interface.add_partitioned_vertices( self._partitioned_graph, self._graph_mapper, self._partitionable_graph) self._database_interface.add_placements(self._placements, self._partitioned_graph) self._database_interface.add_routing_infos( self._routing_infos, self._partitioned_graph) self._database_interface.add_routing_tables(self._router_tables) self._database_interface.add_tags(self._partitioned_graph, self._tags) execute_mapping = config.getboolean( "Database", "create_routing_info_to_neuron_id_mapping") if execute_mapping: self._database_interface.create_neuron_to_key_mapping( graph_mapper=self._graph_mapper, partitionable_graph=self._partitionable_graph, partitioned_graph=self._partitioned_graph, routing_infos=self._routing_infos) self._database_interface.send_read_notification() # execute data spec generation if do_timing: timer.start_timing() logger.info("*** Generating Output *** ") logger.debug("") executable_targets = self.generate_data_specifications() if do_timing: timer.take_sample() # execute data spec execution if do_timing: timer.start_timing() processor_to_app_data_base_address = \ self.execute_data_specification_execution( config.getboolean("SpecExecution", "specExecOnHost"), self._hostname, self._placements, self._graph_mapper, write_text_specs=config.getboolean( "Reports", "writeTextSpecs"), runtime_application_data_folder=self._app_data_runtime_folder) if self._reports_states is not None: reports.write_memory_map_report(self._report_default_directory, processor_to_app_data_base_address) if do_timing: timer.take_sample() if (not isinstance(self._machine, VirtualMachine) and config.getboolean("Execute", "run_simulation")): if do_timing: timer.start_timing() logger.info("*** Loading tags ***") self._load_tags(self._tags) if self._do_load is True: logger.info("*** Loading data ***") self._load_application_data( self._placements, self._router_tables, self._graph_mapper, processor_to_app_data_base_address, self._hostname, self._app_id, machine_version=config.getint("Machine", "version"), app_data_folder=self._app_data_runtime_folder) logger.info("*** Loading executables ***") self._load_executable_images( executable_targets, self._app_id, app_data_folder=self._app_data_runtime_folder) logger.info("*** Loading buffers ***") self._set_up_send_buffering() # end of entire loading setup if do_timing: timer.take_sample() if self._do_run is True: logger.info("*** Running simulation... *** ") if self._reports_states.transciever_report: reports.re_load_script_running_aspects( self._app_data_runtime_folder, executable_targets, self._hostname, self._app_id, run_time) # every thing is in sync0. load the initial buffers self._send_buffer_manager.load_initial_buffers() wait_on_confirmation = config.getboolean( "Database", "wait_on_confirmation") send_start_notification = config.getboolean( "Database", "send_start_notification") self._wait_for_cores_to_be_ready(executable_targets, self._app_id) # wait till external app is ready for us to start if required if (self._database_interface is not None and wait_on_confirmation): logger.info( "*** Awaiting for a response from an external source " "to state its ready for the simulation to start ***") self._database_interface.wait_for_confirmation() self._start_all_cores(executable_targets, self._app_id) if (self._database_interface is not None and send_start_notification): self._database_interface.send_start_notification() if self._runtime is None: logger.info("Application is set to run forever - exiting") else: self._wait_for_execution_to_complete( executable_targets, self._app_id, self._runtime, self._time_scale_factor) self._has_ran = True if self._retrieve_provance_data: # retrieve provence data from central file_path = os.path.join(self._report_default_directory, "provance_data") # check the directory doesnt already exist if not os.path.exists(file_path): os.mkdir(file_path) self._write_provanence_data_in_xml(file_path) # retrieve provenance data from any cores that provide data for placement in self._placements: if isinstance(placement.subvertex, AbstractProvidesProvanenceData): file_path = os.path.join( self._report_default_directory, "Provanence_data_for_core:{}:{}:{}" .format(placement.x, placement.y, placement.p)) elif isinstance(self._machine, VirtualMachine): logger.info( "*** Using a Virtual Machine so no simulation will occur") else: logger.info("*** No simulation requested: Stopping. ***")
def get_synaptic_list_from_machine(self, graph_mapper, partitioned_graph, placements, transceiver, routing_infos): """ Get synaptic data for all connections in this Projection from the machine. :param graph_mapper: :param partitioned_graph: :param placements: :param transceiver: :param routing_infos: :return: """ if self._stored_synaptic_data_from_machine is None: logger.debug("Reading synapse data for edge between {} and {}" .format(self._pre_vertex.label, self._post_vertex.label)) timer = None if conf.config.getboolean("Reports", "outputTimesForSections"): timer = Timer() timer.start_timing() subedges = \ graph_mapper.get_partitioned_edges_from_partitionable_edge( self) if subedges is None: subedges = list() synaptic_list = copy.copy(self._synapse_list) synaptic_list_rows = synaptic_list.get_rows() progress_bar = ProgressBar( len(subedges), "progress on reading back synaptic matrix") for subedge in subedges: n_rows = subedge.get_n_rows(graph_mapper) pre_vertex_slice = \ graph_mapper.get_subvertex_slice(subedge.pre_subvertex) post_vertex_slice = \ graph_mapper.get_subvertex_slice(subedge.post_subvertex) sub_edge_post_vertex = \ graph_mapper.get_vertex_from_subvertex( subedge.post_subvertex) rows = sub_edge_post_vertex.get_synaptic_list_from_machine( placements, transceiver, subedge.pre_subvertex, n_rows, subedge.post_subvertex, self._synapse_row_io, partitioned_graph, routing_infos, subedge.weight_scales).get_rows() for i in range(len(rows)): delay_stage = math.floor( float(i) / float(pre_vertex_slice.n_atoms)) + 1 min_delay = (delay_stage * self.pre_vertex.max_delay_per_neuron) max_delay = (min_delay + self.pre_vertex.max_delay_per_neuron - 1) synaptic_list_rows[ (i % pre_vertex_slice.n_atoms) + pre_vertex_slice.lo_atom].set_slice_values( rows[i], post_vertex_slice, min_delay, max_delay) progress_bar.update() progress_bar.end() self._stored_synaptic_data_from_machine = synaptic_list if conf.config.getboolean("Reports", "outputTimesForSections"): timer.take_sample() return self._stored_synaptic_data_from_machine
def run(self, run_time): """ :param run_time: :return: """ # sort out config param to be valid types width = config.get("Machine", "width") height = config.get("Machine", "height") if width == "None": width = None else: width = int(width) if height == "None": height = None else: height = int(height) number_of_boards = config.get("Machine", "number_of_boards") if number_of_boards == "None": number_of_boards = None self.setup_interfaces( hostname=self._hostname, bmp_details=config.get("Machine", "bmp_names"), downed_chips=config.get("Machine", "down_chips"), downed_cores=config.get("Machine", "down_cores"), board_version=config.getint("Machine", "version"), number_of_boards=number_of_boards, width=width, height=height, is_virtual=config.getboolean("Machine", "virtual_board"), virtual_has_wrap_arounds=config.getboolean( "Machine", "requires_wrap_arounds"), auto_detect_bmp=config.getboolean("Machine", "auto_detect_bmp")) # adds extra stuff needed by the reload script which cannot be given # directly. if self._reports_states.transciever_report: self._reload_script.runtime = run_time self._reload_script.time_scale_factor = self._time_scale_factor # create network report if needed if self._reports_states is not None: reports.network_specification_partitionable_report( self._report_default_directory, self._partitionable_graph, self._hostname) # calculate number of machine time steps if run_time is not None: self._no_machine_time_steps =\ int((run_time * 1000.0) / self._machine_time_step) ceiled_machine_time_steps = \ math.ceil((run_time * 1000.0) / self._machine_time_step) if self._no_machine_time_steps != ceiled_machine_time_steps: raise common_exceptions.ConfigurationException( "The runtime and machine time step combination result in " "a factional number of machine runable time steps and " "therefore spinnaker cannot determine how many to run for") for vertex in self._partitionable_graph.vertices: if isinstance(vertex, AbstractDataSpecableVertex): vertex.set_no_machine_time_steps( self._no_machine_time_steps) else: self._no_machine_time_steps = None logger.warn("You have set a runtime that will never end, this may" "cause the neural models to fail to partition " "correctly") for vertex in self._partitionable_graph.vertices: if (isinstance(vertex, AbstractPopulationRecordableVertex) and vertex.record): raise common_exceptions.ConfigurationException( "recording a population when set to infinite runtime " "is not currently supportable in this tool chain." "watch this space") do_timing = config.getboolean("Reports", "outputTimesForSections") if do_timing: timer = Timer() else: timer = None self.set_runtime(run_time) logger.info("*** Running Mapper *** ") if do_timing: timer.start_timing() self.map_model() if do_timing: timer.take_sample() # add database generation if requested needs_database = self._auto_detect_database(self._partitioned_graph) user_create_database = config.get("Database", "create_database") if ((user_create_database == "None" and needs_database) or user_create_database == "True"): wait_on_confirmation = config.getboolean( "Database", "wait_on_confirmation") self._database_interface = SpynnakerDataBaseInterface( self._app_data_runtime_folder, wait_on_confirmation, self._database_socket_addresses) self._database_interface.add_system_params( self._time_scale_factor, self._machine_time_step, self._runtime) self._database_interface.add_machine_objects(self._machine) self._database_interface.add_partitionable_vertices( self._partitionable_graph) self._database_interface.add_partitioned_vertices( self._partitioned_graph, self._graph_mapper, self._partitionable_graph) self._database_interface.add_placements(self._placements, self._partitioned_graph) self._database_interface.add_routing_infos( self._routing_infos, self._partitioned_graph) self._database_interface.add_routing_tables(self._router_tables) self._database_interface.add_tags(self._partitioned_graph, self._tags) execute_mapping = config.getboolean( "Database", "create_routing_info_to_neuron_id_mapping") if execute_mapping: self._database_interface.create_neuron_to_key_mapping( graph_mapper=self._graph_mapper, partitionable_graph=self._partitionable_graph, partitioned_graph=self._partitioned_graph, routing_infos=self._routing_infos) # if using a reload script, add if that needs to wait for # confirmation if self._reports_states.transciever_report: self._reload_script.wait_on_confirmation = wait_on_confirmation for socket_address in self._database_socket_addresses: self._reload_script.add_socket_address(socket_address) self._database_interface.send_read_notification() # execute data spec generation if do_timing: timer.start_timing() logger.info("*** Generating Output *** ") logger.debug("") executable_targets = self.generate_data_specifications() if do_timing: timer.take_sample() # execute data spec execution if do_timing: timer.start_timing() processor_to_app_data_base_address = \ self.execute_data_specification_execution( config.getboolean("SpecExecution", "specExecOnHost"), self._hostname, self._placements, self._graph_mapper, write_text_specs=config.getboolean( "Reports", "writeTextSpecs"), runtime_application_data_folder=self._app_data_runtime_folder, machine=self._machine) if self._reports_states is not None: reports.write_memory_map_report(self._report_default_directory, processor_to_app_data_base_address) if do_timing: timer.take_sample() if (not isinstance(self._machine, VirtualMachine) and config.getboolean("Execute", "run_simulation")): if do_timing: timer.start_timing() logger.info("*** Loading tags ***") self.load_tags(self._tags) if self._do_load is True: logger.info("*** Loading data ***") self._load_application_data( self._placements, self._graph_mapper, processor_to_app_data_base_address, self._hostname, app_data_folder=self._app_data_runtime_folder, verify=config.getboolean("Mode", "verify_writes")) self.load_routing_tables(self._router_tables, self._app_id) logger.info("*** Loading executables ***") self.load_executable_images(executable_targets, self._app_id) logger.info("*** Loading buffers ***") self.set_up_send_buffering(self._partitioned_graph, self._placements, self._tags) # end of entire loading setup if do_timing: timer.take_sample() if self._do_run is True: logger.info("*** Running simulation... *** ") if do_timing: timer.start_timing() # every thing is in sync0. load the initial buffers self._send_buffer_manager.load_initial_buffers() if do_timing: timer.take_sample() wait_on_confirmation = config.getboolean( "Database", "wait_on_confirmation") send_start_notification = config.getboolean( "Database", "send_start_notification") self.wait_for_cores_to_be_ready(executable_targets, self._app_id) # wait till external app is ready for us to start if required if (self._database_interface is not None and wait_on_confirmation): self._database_interface.wait_for_confirmation() self.start_all_cores(executable_targets, self._app_id) if (self._database_interface is not None and send_start_notification): self._database_interface.send_start_notification() if self._runtime is None: logger.info("Application is set to run forever - exiting") else: self.wait_for_execution_to_complete( executable_targets, self._app_id, self._runtime, self._time_scale_factor) self._has_ran = True if self._retrieve_provance_data: progress = ProgressBar(self._placements.n_placements + 1, "getting provenance data") # retrieve provence data from central file_path = os.path.join(self._report_default_directory, "provance_data") # check the directory doesnt already exist if not os.path.exists(file_path): os.mkdir(file_path) # write provanence data self.write_provenance_data_in_xml(file_path, self._txrx) progress.update() # retrieve provenance data from any cores that provide data for placement in self._placements.placements: if isinstance(placement.subvertex, AbstractProvidesProvenanceData): core_file_path = os.path.join( file_path, "Provanence_data_for_{}_{}_{}_{}.xml".format( placement.subvertex.label, placement.x, placement.y, placement.p)) placement.subvertex.write_provenance_data_in_xml( core_file_path, self.transceiver, placement) progress.update() progress.end() elif isinstance(self._machine, VirtualMachine): logger.info( "*** Using a Virtual Machine so no simulation will occur") else: logger.info("*** No simulation requested: Stopping. ***")
def run(self, run_time): """ :param run_time: :return: """ # sort out config param to be valid types width = config.get("Machine", "width") height = config.get("Machine", "height") if width == "None": width = None else: width = int(width) if height == "None": height = None else: height = int(height) number_of_boards = config.get("Machine", "number_of_boards") if number_of_boards == "None": number_of_boards = None self.setup_interfaces( hostname=self._hostname, bmp_details=config.get("Machine", "bmp_names"), downed_chips=config.get("Machine", "down_chips"), downed_cores=config.get("Machine", "down_cores"), board_version=config.getint("Machine", "version"), number_of_boards=number_of_boards, width=width, height=height, is_virtual=config.getboolean("Machine", "virtual_board"), virtual_has_wrap_arounds=config.getboolean( "Machine", "requires_wrap_arounds"), auto_detect_bmp=config.getboolean("Machine", "auto_detect_bmp")) # adds extra stuff needed by the reload script which cannot be given # directly. if self._reports_states.transciever_report: self._reload_script.runtime = run_time self._reload_script.time_scale_factor = self._time_scale_factor # create network report if needed if self._reports_states is not None: reports.network_specification_partitionable_report( self._report_default_directory, self._partitionable_graph, self._hostname) # calculate number of machine time steps if run_time is not None: self._no_machine_time_steps =\ int((run_time * 1000.0) / self._machine_time_step) ceiled_machine_time_steps = \ math.ceil((run_time * 1000.0) / self._machine_time_step) if self._no_machine_time_steps != ceiled_machine_time_steps: raise common_exceptions.ConfigurationException( "The runtime and machine time step combination result in " "a factional number of machine runable time steps and " "therefore spinnaker cannot determine how many to run for") for vertex in self._partitionable_graph.vertices: if isinstance(vertex, AbstractDataSpecableVertex): vertex.set_no_machine_time_steps( self._no_machine_time_steps) else: self._no_machine_time_steps = None logger.warn("You have set a runtime that will never end, this may" "cause the neural models to fail to partition " "correctly") for vertex in self._partitionable_graph.vertices: if (isinstance(vertex, AbstractPopulationRecordableVertex) and vertex.record): raise common_exceptions.ConfigurationException( "recording a population when set to infinite runtime " "is not currently supportable in this tool chain." "watch this space") do_timing = config.getboolean("Reports", "outputTimesForSections") if do_timing: timer = Timer() else: timer = None self.set_runtime(run_time) logger.info("*** Running Mapper *** ") if do_timing: timer.start_timing() self.map_model() if do_timing: timer.take_sample() # add database generation if requested needs_database = self._auto_detect_database(self._partitioned_graph) user_create_database = config.get("Database", "create_database") if ((user_create_database == "None" and needs_database) or user_create_database == "True"): wait_on_confirmation = config.getboolean("Database", "wait_on_confirmation") self._database_interface = SpynnakerDataBaseInterface( self._app_data_runtime_folder, wait_on_confirmation, self._database_socket_addresses) self._database_interface.add_system_params(self._time_scale_factor, self._machine_time_step, self._runtime) self._database_interface.add_machine_objects(self._machine) self._database_interface.add_partitionable_vertices( self._partitionable_graph) self._database_interface.add_partitioned_vertices( self._partitioned_graph, self._graph_mapper, self._partitionable_graph) self._database_interface.add_placements(self._placements, self._partitioned_graph) self._database_interface.add_routing_infos(self._routing_infos, self._partitioned_graph) self._database_interface.add_routing_tables(self._router_tables) self._database_interface.add_tags(self._partitioned_graph, self._tags) execute_mapping = config.getboolean( "Database", "create_routing_info_to_neuron_id_mapping") if execute_mapping: self._database_interface.create_neuron_to_key_mapping( graph_mapper=self._graph_mapper, partitionable_graph=self._partitionable_graph, partitioned_graph=self._partitioned_graph, routing_infos=self._routing_infos) # if using a reload script, add if that needs to wait for # confirmation if self._reports_states.transciever_report: self._reload_script.wait_on_confirmation = wait_on_confirmation for socket_address in self._database_socket_addresses: self._reload_script.add_socket_address(socket_address) self._database_interface.send_read_notification() # execute data spec generation if do_timing: timer.start_timing() logger.info("*** Generating Output *** ") logger.debug("") executable_targets = self.generate_data_specifications() if do_timing: timer.take_sample() # execute data spec execution if do_timing: timer.start_timing() processor_to_app_data_base_address = \ self.execute_data_specification_execution( config.getboolean("SpecExecution", "specExecOnHost"), self._hostname, self._placements, self._graph_mapper, write_text_specs=config.getboolean( "Reports", "writeTextSpecs"), runtime_application_data_folder=self._app_data_runtime_folder, machine=self._machine) if self._reports_states is not None: reports.write_memory_map_report( self._report_default_directory, processor_to_app_data_base_address) if do_timing: timer.take_sample() if (not isinstance(self._machine, VirtualMachine) and config.getboolean("Execute", "run_simulation")): if do_timing: timer.start_timing() logger.info("*** Loading tags ***") self.load_tags(self._tags) if self._do_load is True: logger.info("*** Loading data ***") self._load_application_data( self._placements, self._graph_mapper, processor_to_app_data_base_address, self._hostname, app_data_folder=self._app_data_runtime_folder, verify=config.getboolean("Mode", "verify_writes")) self.load_routing_tables(self._router_tables, self._app_id) logger.info("*** Loading executables ***") self.load_executable_images(executable_targets, self._app_id) logger.info("*** Loading buffers ***") self.set_up_send_buffering(self._partitioned_graph, self._placements, self._tags) # end of entire loading setup if do_timing: timer.take_sample() if self._do_run is True: logger.info("*** Running simulation... *** ") if do_timing: timer.start_timing() # every thing is in sync0. load the initial buffers self._send_buffer_manager.load_initial_buffers() if do_timing: timer.take_sample() wait_on_confirmation = config.getboolean( "Database", "wait_on_confirmation") send_start_notification = config.getboolean( "Database", "send_start_notification") self.wait_for_cores_to_be_ready(executable_targets, self._app_id) # wait till external app is ready for us to start if required if (self._database_interface is not None and wait_on_confirmation): self._database_interface.wait_for_confirmation() self.start_all_cores(executable_targets, self._app_id) if (self._database_interface is not None and send_start_notification): self._database_interface.send_start_notification() if self._runtime is None: logger.info("Application is set to run forever - exiting") else: self.wait_for_execution_to_complete( executable_targets, self._app_id, self._runtime, self._time_scale_factor) self._has_ran = True if self._retrieve_provance_data: progress = ProgressBar(self._placements.n_placements + 1, "getting provenance data") # retrieve provence data from central file_path = os.path.join(self._report_default_directory, "provance_data") # check the directory doesnt already exist if not os.path.exists(file_path): os.mkdir(file_path) # write provanence data self.write_provenance_data_in_xml(file_path, self._txrx) progress.update() # retrieve provenance data from any cores that provide data for placement in self._placements.placements: if isinstance(placement.subvertex, AbstractProvidesProvenanceData): core_file_path = os.path.join( file_path, "Provanence_data_for_{}_{}_{}_{}.xml".format( placement.subvertex.label, placement.x, placement.y, placement.p)) placement.subvertex.write_provenance_data_in_xml( core_file_path, self.transceiver, placement) progress.update() progress.end() elif isinstance(self._machine, VirtualMachine): logger.info( "*** Using a Virtual Machine so no simulation will occur") else: logger.info("*** No simulation requested: Stopping. ***")