def run(self, run_time, sync_time=0.0):
        """ Run the model created.

        :param run_time: the time (in milliseconds) to run the simulation for
        :type run_time: float or int
        :param float sync_time:
            If not 0, this specifies that the simulation should pause after
            this duration.  The continue_simulation() method must then be
            called for the simulation to continue.
        :rtype: None
        """
        # pylint: disable=protected-access

        # extra post run algorithms
        self._dsg_algorithm = "SpynnakerDataSpecificationWriter"
        for projection in self._projections:
            projection._clear_cache()

        if (get_config_bool("Reports", "reports_enabled") and get_config_bool(
                "Reports", "write_redundant_packet_count_report")
                and not self._use_virtual_board and run_time is not None
                and not self._has_ran
                and get_config_bool("Reports", "writeProvenanceData")):
            self.extend_extra_post_run_algorithms(
                ["RedundantPacketCountReport"])

        super().run(run_time, sync_time)
        for projection in self._projections:
            projection._clear_cache()
    def __call__(self, placements, app_graph, executable_finder, transceiver,
                 machine_graph, routing_infos):
        """ Loads and runs the bit field generator on chip.

        :param ~pacman.model.placements.Placements placements: placements
        :param ~pacman.model.graphs.application.ApplicationGraph app_graph:
            the app graph
        :param executable_finder: the executable finder
        :type executable_finder:
            ~spinn_front_end_common.utilities.utility_objs.ExecutableFinder
        :param ~spinnman.transceiver.Transceiver transceiver:
            the SpiNNMan instance
        :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
            the machine graph
        :param ~pacman.model.routing_info.RoutingInfo routing_infos:
            the key to edge map
        """
        self.__txrx = transceiver
        self.__placements = placements
        self.__aplx = executable_finder.get_executable_path(
            self._BIT_FIELD_EXPANDER_APLX)

        # progress bar
        progress = ProgressBar(
            app_graph.n_vertices + machine_graph.n_vertices + 1,
            "Running bitfield generation on chip")

        # get data
        expander_cores = self._calculate_core_data(app_graph, progress)

        # load data
        bit_field_app_id = transceiver.app_id_tracker.get_new_id()
        progress.update(1)

        # run app
        system_control_logic.run_system_application(
            expander_cores,
            bit_field_app_id,
            transceiver,
            executable_finder,
            get_config_bool("Reports", "write_bit_field_iobuf"),
            self.__check_for_success, [CPUState.FINISHED],
            False,
            "bit_field_expander_on_{}_{}_{}.txt",
            progress_bar=progress)
        # update progress bar
        progress.end()

        # read in bit fields for debugging purposes
        if get_config_bool("Reports", "generate_bit_field_report"):
            self._full_report_bit_fields(
                app_graph,
                os.path.join(report_default_directory(),
                             self._BIT_FIELD_REPORT_FILENAME))
            self._summary_report_bit_fields(
                app_graph,
                os.path.join(report_default_directory(),
                             self._BIT_FIELD_SUMMARY_REPORT_FILENAME))
Exemplo n.º 3
0
 def report_file(self):
     if get_config_bool("Java", "use_java"):
         style = "java_"
     else:
         style = "python_"
     if get_config_bool("Machine", "enable_advanced_monitor_support"):
         style += "advanced"
     else:
         style += "simple"
     return "{}_test_big_connection".format(style)
Exemplo n.º 4
0
 def report_file(self):
     if get_config_bool("Java", "use_java"):
         style = "java_"
     else:
         style = "python_"
     if get_config_bool("Machine", "enable_advanced_monitor_support"):
         style += "advanced"
     else:
         style += "simple"
     return "{}_n_boards={}_n_neurons={}_simtime={}".format(
         style, self.n_boards, self.n_neurons, self.simtime)
Exemplo n.º 5
0
 def _execute_synapse_expander(self):
     with FecTimer(LOADING, "Synapse expander") as timer:
         if timer.skip_if_virtual_board():
             return
         synapse_expander(
             self.placements, self._txrx, self._executable_finder,
             get_config_bool("Reports", "write_expander_iobuf"))
Exemplo n.º 6
0
    def _fill_machine(self, machine):
        """
        :param ~spinn_machine.Machine machine:
        :param bool repair_machine:
        :param bool ignore_bad_ethernets:
        :rtype: ~spinn_machine.Machine
        """
        for chip_info in sorted(self._chip_info.values(),
                                key=lambda chip: (chip.x, chip.y)):
            if (chip_info.ethernet_ip_address is not None
                    and (chip_info.x != chip_info.nearest_ethernet_x
                         or chip_info.y != chip_info.nearest_ethernet_y)):
                if get_config_bool("Machine", "ignore_bad_ethernets"):
                    logger.warning(
                        "Chip {}:{} claimed it has ip address: {}. "
                        "This ip will not be used.", chip_info.x, chip_info.y,
                        chip_info.ethernet_ip_address)
                    chip_info._ethernet_ip_address = None
                else:
                    logger.warning(
                        "Not using chip {}:{} as it has an unexpected "
                        "ip address: {}", chip_info.x, chip_info.y,
                        chip_info.ethernet_ip_address)
                    continue

            # If the above has not continued, add the chip
            machine.add_chip(self._make_chip(chip_info, machine))

        machine.validate()
        return machine_repair(machine)
    def get_projections_data(self, projection_to_attribute_map):
        """ Common data extractor for projection data. Allows fully
            exploitation of the ????

        :param projection_to_attribute_map:
            the projection to attributes mapping
        :type projection_to_attribute_map:
            dict(~spynnaker.pyNN.models.projection.Projection,
            list(int) or tuple(int) or None)
        :return: a extracted data object with get method for getting the data
        :rtype: ExtractedData
        """
        # pylint: disable=protected-access

        # build data structure for holding data
        mother_lode = ExtractedData()

        # if using extra monitor functionality, locate extra data items
        receivers = list()
        if get_config_bool("Machine", "enable_advanced_monitor_support"):
            receivers = self._locate_receivers_from_projections(
                projection_to_attribute_map.keys(),
                self.get_generated_output(
                    "VertexToEthernetConnectedChipMapping"),
                self.get_generated_output("ExtraMonitorToChipMapping"))

        # set up the router timeouts to stop packet loss
        for data_receiver, extra_monitor_cores in receivers:
            data_receiver.load_system_routing_tables(
                self._txrx, self.get_generated_output("ExtraMonitorVertices"),
                self._placements)
            data_receiver.set_cores_for_data_streaming(
                self._txrx, list(extra_monitor_cores), self._placements)

        # acquire the data
        for projection in projection_to_attribute_map:
            for attribute in projection_to_attribute_map[projection]:
                data = projection._get_synaptic_data(
                    as_list=True,
                    data_to_get=attribute,
                    fixed_values=None,
                    notify=None,
                    handle_time_out_configuration=False)
                mother_lode.set(projection, attribute, data)

        # reset time outs for the receivers
        for data_receiver, extra_monitor_cores in receivers:
            data_receiver.unset_cores_for_data_streaming(
                self._txrx, list(extra_monitor_cores), self._placements)
            data_receiver.load_application_routing_tables(
                self._txrx, self.get_generated_output("ExtraMonitorVertices"),
                self._placements)

        # return data items
        return mother_lode
Exemplo n.º 8
0
 def _execute_spynnaker_pair_compressor(self):
     with FecTimer(
             LOADING, "Spynnaker machine bitfield pair router compressor") \
             as timer:
         if timer.skip_if_virtual_board():
             return
         spynnaker_machine_bitField_pair_router_compressor(
             self._router_tables, self._txrx, self._machine, self._app_id,
             self._machine_graph, self._placements, self._executable_finder,
             self._routing_infos, self._executable_targets,
             get_config_bool("Reports", "write_expander_iobuf"))
         self._multicast_routes_loaded = True
         return None
Exemplo n.º 9
0
def minimise(routing_table,
             use_timer_cut_off=False,
             time_to_run_for_before_raising_exception=None):
    """Reduce the size of a routing table by merging together entries where \
    possible and by removing any remaining default routes.

    .. warning::

        The input routing table *must* also include entries which could be
        removed and replaced by default routing.

    .. warning::

        It is assumed that the input routing table is not in any particular
        order and may be reordered into ascending order of generality (number
        of don't cares/Xs in the key-mask) without affecting routing
        correctness.  It is also assumed that if this table is unordered it is
        at least orthogonal (i.e., there are no two entries which would match
        the same key) and reorderable.

    :param list(Entry) routing_table:
        Routing entries to be merged.
    :param bool use_timer_cut_off: flag for timing cutoff to be used.
    :param time_to_run_for_before_raising_exception:
        The time to run for in seconds before raising an exception
    :type time_to_run_for_before_raising_exception: int or None
    :return: The compressed table entries
    :rtype: list(Entry)
    :raises MinimisationFailedError:
        If the smallest table that can be produced is larger than
        ``target_length``.
    """
    if get_config_bool("Mapping", "router_table_compress_as_far_as_possible"):
        # Compress as much as possible
        target_length = None
    else:
        target_length = Machine.ROUTER_ENTRIES

    # Keep None values as that flags as much as possible
    table, _ = ordered_covering(
        routing_table=routing_table,
        target_length=target_length,
        no_raise=True,
        use_timer_cut_off=use_timer_cut_off,
        time_to_run_for=time_to_run_for_before_raising_exception)
    return remove_default_routes(table, target_length)
Exemplo n.º 10
0
    def compress_tables(self, router_tables, progress):
        """ Compress all the unordered routing tables

        Tables who start of smaller than target_length are not compressed

        :param MulticastRoutingTables router_tables: Routing tables
        :param ~spinn_utilities.progress_bar.ProgressBar progress:
            Progress bar to show while working
        :return: The compressed but still unordered routing tables
        :rtype: MulticastRoutingTables
        :raises MinimisationFailedError: on failure
        """
        compressed_tables = MulticastRoutingTables()
        self._problems = ""
        if get_config_bool("Mapping",
                           "router_table_compress_as_far_as_possible"):
            # Compress as much as possible
            target_length = 0
        else:
            target_length = Machine.ROUTER_ENTRIES
        for table in progress.over(router_tables.routing_tables):
            if table.number_of_entries < target_length:
                new_table = table
            else:
                compressed_table = self.compress_table(table)

                new_table = CompressedMulticastRoutingTable(table.x, table.y)

                for entry in compressed_table:
                    new_table.add_multicast_routing_entry(
                        entry.to_MulticastRoutingEntry())
                if new_table.number_of_entries > Machine.ROUTER_ENTRIES:
                    self._problems += "(x:{},y:{})={} ".format(
                        new_table.x, new_table.y, new_table.number_of_entries)

            compressed_tables.add_routing_table(new_table)

        if len(self._problems) > 0:
            if self._ordered and not self._accept_overflow:
                raise MinimisationFailedError(
                    "The routing table after compression will still not fit"
                    " within the machines router: {}".format(self._problems))
            else:
                logger.warning(self._problems)
        return compressed_tables
Exemplo n.º 11
0
    def compress_table(self, uncompressed):
        """ Compresses all the entries for a single table.

        Compressed the entries for this unordered table
        returning a new table with possibly fewer entries

        :param UnCompressedMulticastRoutingTable uncompressed:
            Original Routing table for a single chip
        :return: Compressed routing table for the same chip
        :rtype: list(Entry)
        """
        # Check you need to compress
        if not get_config_bool("Mapping",
                               "router_table_compress_as_far_as_possible"):
            if uncompressed.number_of_entries < Machine.ROUTER_ENTRIES:
                return uncompressed

        # Step 1 get the entries and make sure they are sorted by key
        self._entries = uncompressed.multicast_routing_entries
        self._entries.sort(key=lambda x: x.routing_entry_key)
        if not self._validate():
            return uncompressed

        # Step 2 Create the results Table
        self._compressed = CompressedMulticastRoutingTable(
            uncompressed.x, uncompressed.y)

        # Step 3 Find ranges of entries with the same route and merge them
        # Start the first range
        route = self._entries[0].spinnaker_route
        first = 0
        for i, entry in enumerate(self._entries):
            # Keep building the range until the route changes
            if entry.spinnaker_route != route:
                # Merge all the entries in the range
                self._merge_range(first, i - 1)
                # Start the next range with this entry
                first = i
                route = entry.spinnaker_route
        # Merge the last range
        self._merge_range(first, i)

        # return the results as a list
        return self._compressed
Exemplo n.º 12
0
    def __init__(self,
                 n_neurons,
                 delay_per_stage,
                 n_delay_stages,
                 source_vertex,
                 constraints=None,
                 label="DelayExtension"):
        """
        :param int n_neurons: the number of neurons
        :param int delay_per_stage: the delay per stage
        :param int n_delay_stages: the (initial) number of delay stages needed
        :param ~pacman.model.graphs.application.ApplicationVertex \
                source_vertex:
            where messages are coming from
        :param iterable(~pacman.model.constraints.AbstractConstraint) \
                constraints:
            the vertex constraints
        :param str label: the vertex label
        """
        # pylint: disable=too-many-arguments
        super().__init__(label,
                         constraints,
                         POP_TABLE_MAX_ROW_LENGTH,
                         splitter=None)

        self.__source_vertex = source_vertex
        self.__n_delay_stages = n_delay_stages
        self.__delay_per_stage = delay_per_stage
        self.__delay_generator_data = defaultdict(list)

        # atom store
        self.__n_atoms = self.round_n_atoms(n_neurons, "n_neurons")

        # Dictionary of vertex_slice -> delay block for data specification
        self.__delay_blocks = dict()

        self.__drop_late_spikes = get_config_bool("Simulation",
                                                  "drop_late_spikes")
Exemplo n.º 13
0
    def debug(self, run_zero):
        # pylint: disable=protected-access
        reports = [
            # write_energy_report
            # EnergyReport._DETAILED_FILENAME,
            # EnergyReport._SUMMARY_FILENAME,
            # write_text_specs = False
            "data_spec_text_files",
            # write_router_reports
            reports_names._ROUTING_FILENAME,
            # write_partitioner_reports
            reports_names._PARTITIONING_FILENAME,
            # write_application_graph_placer_report
            reports_names._PLACEMENT_VTX_GRAPH_FILENAME,
            reports_names._PLACEMENT_CORE_GRAPH_FILENAME,
            reports_names._SDRAM_FILENAME,
            # write_machine_graph_placer_report
            reports_names._PLACEMENT_VTX_SIMPLE_FILENAME,
            reports_names._PLACEMENT_CORE_SIMPLE_FILENAME,
            # repeats reports_names._SDRAM_FILENAME,
            # write_router_info_report
            reports_names._VIRTKEY_FILENAME,
            # write_routing_table_reports
            reports_names._ROUTING_TABLE_DIR,
            reports_names._C_ROUTING_TABLE_DIR,
            reports_names._COMPARED_FILENAME,
            # write_routing_tables_from_machine_report
            routing_tables_from_machine_report,
            # write_memory_map_report
            memory_map_on_host_report,
            # write_network_specification_report
            network_specification_file_name,
            # write_provenance_data
            "provenance_data",
            # write_tag_allocation_reports
            reports_names._TAGS_FILENAME,
            # write_algorithm_timings
            # "provenance_data/pacman.xml"  = different test
            # write_board_chip_report
            AREA_CODE_REPORT_NAME,
            _GRAPH_NAME,
            _GRAPH_NAME + "." + _GRAPH_FORMAT,
        ]

        sim.setup(1.0)
        if (get_config_bool("Machine", "enable_advanced_monitor_support")
                and not get_config_bool("Java", "use_java")):
            # write_data_speed_up_report
            reports.append(
                DataSpeedUpPacketGatherMachineVertex.OUT_REPORT_NAME)
            reports.append(DataSpeedUpPacketGatherMachineVertex.IN_REPORT_NAME)
        pop = sim.Population(100, sim.IF_curr_exp, {}, label="pop")
        pop.record("v")
        inp = sim.Population(1,
                             sim.SpikeSourceArray(spike_times=[0]),
                             label="input")
        sim.Projection(inp,
                       pop,
                       sim.AllToAllConnector(),
                       synapse_type=sim.StaticSynapse(weight=5))
        if run_zero:
            sim.run(0)
        sim.run(1000)
        pop.get_data("v")
        sim.end()

        report_directory = globals_variables.report_default_directory()
        found = os.listdir(report_directory)
        for report in reports:
            self.assertIn(report, found)
Exemplo n.º 14
0
    def _set_up_timings(self, timestep, min_delay, time_scale_factor):
        """
        :param timestep: machine_time_Step in milli seconds
        :type timestep: float or None
        :tpye min_delay: int or None
        :type time_scale_factor: int or None
        """

        # Get the standard values
        if timestep is None:
            self.set_up_timings(timestep, time_scale_factor)
        else:
            self.set_up_timings(
                math.ceil(timestep * MICRO_TO_MILLISECOND_CONVERSION),
                time_scale_factor)

        # Sort out the minimum delay
        if (min_delay is not None and
                min_delay < self.machine_time_step_ms):
            raise ConfigurationException(
                f"Pacman does not support min delays below "
                f"{constants.MIN_SUPPORTED_DELAY * self.machine_time_step} "
                f"ms with the current machine time step")
        if min_delay is not None:
            self.__min_delay = min_delay
        else:
            self.__min_delay = self.machine_time_step_ms

        # Sort out the time scale factor if not user specified
        # (including config)
        if self.time_scale_factor is None:
            self.time_scale_factor = max(
                1.0, math.ceil(
                    MICRO_TO_MILLISECOND_CONVERSION / self.machine_time_step))
            if self.time_scale_factor > 1:
                logger.warning(
                    "A timestep was entered that has forced sPyNNaker to "
                    "automatically slow the simulation down from real time "
                    "by a factor of {}. To remove this automatic behaviour, "
                    "please enter a timescaleFactor value in your .{}",
                    self.time_scale_factor, CONFIG_FILE_NAME)

        # Check the combination of machine time step and time scale factor
        if (self.machine_time_step_ms * self.time_scale_factor < 1):
            if not get_config_bool(
                    "Mode", "violate_1ms_wall_clock_restriction"):
                raise ConfigurationException(
                    "The combination of simulation time step and the machine "
                    "time scale factor results in a wall clock timer tick "
                    "that is currently not reliably supported by the"
                    "SpiNNaker machine.  If you would like to override this"
                    "behaviour (at your own risk), please add "
                    "violate_1ms_wall_clock_restriction = True to the [Mode] "
                    "section of your .{} file".format(CONFIG_FILE_NAME))
            logger.warning(
                "****************************************************")
            logger.warning(
                "*** The combination of simulation time step and  ***")
            logger.warning(
                "*** the machine time scale factor results in a   ***")
            logger.warning(
                "*** wall clock timer tick that is currently not  ***")
            logger.warning(
                "*** reliably supported by the SpiNNaker machine. ***")
            logger.warning(
                "****************************************************")
Exemplo n.º 15
0
    def __init__(self,
                 graph_label,
                 database_socket_addresses,
                 n_chips_required,
                 n_boards_required,
                 timestep,
                 min_delay,
                 hostname,
                 user_extra_algorithm_xml_path=None,
                 user_extra_mapping_inputs=None,
                 user_extra_algorithms_pre_run=None,
                 time_scale_factor=None,
                 extra_post_run_algorithms=None,
                 extra_mapping_algorithms=None,
                 extra_load_algorithms=None,
                 front_end_versions=None):
        """
        :param str graph_label:
        :param database_socket_addresses:
        :type database_socket_addresses:
            iterable(~spinn_utilities.socket_address.SocketAddress)
        :param n_chips_required:
        :type n_chips_required: int or None
        :param n_boards_required:
        :type n_boards_required: int or None
        :param timestep:
            machine_time_step but in milli seconds. If None uses the cfg value
        :type timestep: float or None
        :param float min_delay:
        :param str hostname:
        :param user_extra_algorithm_xml_path:
        :type user_extra_algorithm_xml_path: str or None
        :param user_extra_mapping_inputs:
        :type user_extra_mapping_inputs: dict(str, Any) or None
        :param user_extra_algorithms_pre_run:
        :type user_extra_algorithms_pre_run: list(str) or None
        :param time_scale_factor:
        :type time_scale_factor: float or None
        :param extra_post_run_algorithms:
        :type extra_post_run_algorithms: list(str) or None
        :param extra_mapping_algorithms:
        :type extra_mapping_algorithms: list(str) or None
        :param extra_load_algorithms:
        :type extra_load_algorithms: list(str) or None
        :param front_end_versions:
        :type front_end_versions: list(tuple(str,str)) or None
        """
        # pylint: disable=too-many-arguments, too-many-locals

        setup_configs()

        # add model binaries
        self.__EXECUTABLE_FINDER.add_path(
            os.path.dirname(model_binaries.__file__))

        # pynn population objects
        self._populations = []
        self._projections = []
        self.__edge_count = 0
        self.__id_counter = 0

        # the number of edges that are associated with commands being sent to
        # a vertex
        self.__command_edge_count = 0
        self.__live_spike_recorder = dict()

        # create XML path for where to locate sPyNNaker related functions when
        # using auto pause and resume
        extra_algorithm_xml_path = list()
        extra_algorithm_xml_path.append(
            os.path.join(os.path.dirname(extra_algorithms.__file__),
                         "algorithms_metadata.xml"))
        if user_extra_algorithm_xml_path is not None:
            extra_algorithm_xml_path.extend(user_extra_algorithm_xml_path)

        # timing parameters
        self.__min_delay = None

        self.__neurons_per_core_set = set()

        versions = [("sPyNNaker", version)]
        if front_end_versions is not None:
            versions.extend(front_end_versions)

        super().__init__(executable_finder=self.__EXECUTABLE_FINDER,
                         graph_label=graph_label,
                         database_socket_addresses=database_socket_addresses,
                         extra_algorithm_xml_paths=extra_algorithm_xml_path,
                         n_chips_required=n_chips_required,
                         n_boards_required=n_boards_required,
                         front_end_versions=versions)

        # update inputs needed by the machine level calls.

        extra_mapping_inputs = dict()
        extra_mapping_inputs["SynapticExpanderReadIOBuf"] = \
            get_config_bool("Reports", "write_expander_iobuf")
        if user_extra_mapping_inputs is not None:
            extra_mapping_inputs.update(user_extra_mapping_inputs)

        if extra_mapping_algorithms is None:
            extra_mapping_algorithms = []
        if extra_load_algorithms is None:
            extra_load_algorithms = []
        if extra_post_run_algorithms is None:
            extra_post_run_algorithms = []
        extra_load_algorithms.append("SynapseExpander")
        extra_load_algorithms.append("OnChipBitFieldGenerator")
        extra_load_algorithms.append("FinishConnectionHolders")
        extra_algorithms_pre_run = []

        if get_config_bool("Reports", "write_network_graph"):
            extra_mapping_algorithms.append(
                "SpYNNakerNeuronGraphNetworkSpecificationReport")

        if get_config_bool("Reports", "reports_enabled"):
            if get_config_bool("Reports", "write_synaptic_report"):
                logger.exception(
                    "write_synaptic_report ignored due to https://github.com/"
                    "SpiNNakerManchester/sPyNNaker/issues/1081")
                # extra_algorithms_pre_run.append("SynapticMatrixReport")
        if user_extra_algorithms_pre_run is not None:
            extra_algorithms_pre_run.extend(user_extra_algorithms_pre_run)

        self.update_extra_mapping_inputs(extra_mapping_inputs)
        self.extend_extra_mapping_algorithms(extra_mapping_algorithms)
        self.prepend_extra_pre_run_algorithms(extra_algorithms_pre_run)
        self.extend_extra_post_run_algorithms(extra_post_run_algorithms)
        self.extend_extra_load_algorithms(extra_load_algorithms)

        # set up machine targeted data
        self._set_up_timings(timestep, min_delay, time_scale_factor)
        self.set_up_machine_specifics(hostname)

        logger.info(f'Setting time scale factor to '
                    f'{self.time_scale_factor}.')

        # get the machine time step
        logger.info(f'Setting machine time step to '
                    f'{self.machine_time_step} '
                    f'micro-seconds.')