def test_stop_init(self):
     class_file = sys.modules[self.__module__].__file__
     path = os.path.dirname(os.path.abspath(class_file))
     os.chdir(path)
     interface = AbstractSpinnakerBase(base.CONFIG_FILE, ExecutableFinder())
     mock_contoller = Close_Once()
     interface._machine_allocation_controller = mock_contoller
     self.assertFalse(mock_contoller.closed)
     interface.stop(turn_off_machine=False, clear_routing_tables=False,
                    clear_tags=False)
     self.assertTrue(mock_contoller.closed)
     interface.stop(turn_off_machine=False, clear_routing_tables=False,
                    clear_tags=False)
 def test_min_init(self):
     class_file = sys.modules[self.__module__].__file__
     path = os.path.dirname(os.path.abspath(class_file))
     os.chdir(path)
     print(path)
     AbstractSpinnakerBase(CONFIG_FILE, ExecutableFinder())
 def __init__(self, machine_time_step=None, time_scale_factor=None):
     super(MainInterfaceTimingImpl, self).__init__(base.CONFIG_FILE,
                                                   ExecutableFinder())
     self.set_up_timings(machine_time_step, time_scale_factor)
示例#4
0
def setup(hostname=None,
          graph_label=None,
          model_binary_module=None,
          model_binary_folder=None,
          database_socket_addresses=(),
          user_dsg_algorithm=None,
          n_chips_required=None,
          n_boards_required=None,
          extra_pre_run_algorithms=(),
          extra_post_run_algorithms=(),
          time_scale_factor=None,
          machine_time_step=None):
    """ Set up a graph, ready to have vertices and edges added to it, and the\
        simulator engine that will execute the graph.

    .. note::
        This must be called *before* the other functions in this API.

    :param str hostname:
        the hostname of the SpiNNaker machine to operate on
        (overrides the ``machine_name`` from the cfg file).
    :param str graph_label:
        a human readable label for the graph (used mainly in reports)
    :param ~types.ModuleType model_binary_module:
        the Python module where the binary files (``.aplx``) can be found for
        the compiled C code that is being used in this application; mutually
        exclusive with the ``model_binary_folder``.
    :param str model_binary_folder:
        the folder where the binary files can be found for the c code that is
        being used in this application; mutually exclusive with the
        ``model_binary_module``.
    :param database_socket_addresses:
        set of SocketAddresses to be added for the database notification
        system. These are over and above the ones used by the
        :py:class:`~spinn_front_end_common.utilities.connections.LiveEventConnection`
    :type database_socket_addresses:
        ~collections.abc.Iterable(~spinn_utilities.socket_address.SocketAddress)
    :param str user_dsg_algorithm:
        an algorithm used for generating the application data which is loaded
        onto the machine. If not set, will use the data specification language
        algorithm required for the type of graph being used.
    :param n_chips_required:
        Deprecated! Use ``n_boards_required`` instead.
        Must be ``None`` if ``n_boards_required`` specified.
    :type n_chips_required: int or None
    :param n_boards_required:
        if you need to be allocated a machine (for spalloc) before building
        your graph, then fill this in with a general idea of the number of
        boards you need so that the spalloc system can allocate you a machine
        big enough for your needs.
    :type n_boards_required: int or None
    :param ~collections.abc.Iterable(str) extra_pre_run_algorithms:
        algorithms which need to be ran after mapping and loading has occurred
        but before the system has ran. These are plugged directly into the
        work flow management.
    :param ~collections.abc.Iterable(str) extra_post_run_algorithms:
        algorithms which need to be ran after the simulation has ran. These
        could be post processing of generated data on the machine for example.
    :raise ~spinn_front_end_common.utilities.exceptions.ConfigurationException:
        if mutually exclusive options are given.
    """
    # pylint: disable=redefined-outer-name
    logger.info("SpiNNaker graph front end (c) {}, University of Manchester",
                __version_year__)
    parent_dir = os.path.split(os.path.split(gfe_file.__file__)[0])[0]
    logger.info("Release version {}({}) - {} {}. Installed in folder {}",
                __version__, __version_name__, __version_month__,
                __version_year__, parent_dir)

    # add the directories where the binaries are located
    executable_finder = ExecutableFinder()
    if model_binary_module is not None:
        executable_finder.add_path(
            os.path.dirname(model_binary_module.__file__))
    elif model_binary_folder is not None:
        executable_finder.add_path(model_binary_folder)
    else:
        file_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
        executable_finder.add_path(file_dir)

    # set up the spinnaker object; after this, _sim() returns this object
    SpiNNaker(host_name=hostname,
              graph_label=graph_label,
              executable_finder=executable_finder,
              database_socket_addresses=database_socket_addresses,
              dsg_algorithm=user_dsg_algorithm,
              n_chips_required=n_chips_required,
              n_boards_required=n_boards_required,
              extra_pre_run_algorithms=extra_pre_run_algorithms,
              extra_post_run_algorithms=extra_post_run_algorithms,
              machine_time_step=machine_time_step,
              time_scale_factor=time_scale_factor)
class AbstractSpiNNakerCommon(
        with_metaclass(AbstractBase, AbstractSpinnakerBase,
                       SpynnakerSimulatorInterface)):
    """ Main interface for neural code.
    """
    __slots__ = [
        "__command_edge_count", "__edge_count", "__id_counter",
        "__live_spike_recorder", "__max_delay", "__min_delay",
        "__neurons_per_core_set", "_populations", "_projections"
    ]

    CONFIG_FILE_NAME = "spynnaker.cfg"

    __EXECUTABLE_FINDER = ExecutableFinder()

    def __init__(self,
                 graph_label,
                 database_socket_addresses,
                 n_chips_required,
                 n_boards_required,
                 timestep,
                 max_delay,
                 min_delay,
                 hostname,
                 user_extra_algorithm_xml_path=None,
                 user_extra_mapping_inputs=None,
                 user_extra_algorithms_pre_run=None,
                 time_scale_factor=None,
                 extra_post_run_algorithms=None,
                 extra_mapping_algorithms=None,
                 extra_load_algorithms=None,
                 front_end_versions=None):
        # pylint: disable=too-many-arguments, too-many-locals

        # add model binaries
        self.__EXECUTABLE_FINDER.add_path(
            os.path.dirname(model_binaries.__file__))

        # pynn population objects
        self._populations = []
        self._projections = []
        self.__edge_count = 0
        self.__id_counter = 0

        # the number of edges that are associated with commands being sent to
        # a vertex
        self.__command_edge_count = 0
        self.__live_spike_recorder = dict()

        # create XML path for where to locate sPyNNaker related functions when
        # using auto pause and resume
        extra_algorithm_xml_path = list()
        extra_algorithm_xml_path.append(
            os.path.join(os.path.dirname(overridden_pacman_functions.__file__),
                         "algorithms_metadata.xml"))
        extra_algorithm_xml_path.append(
            os.path.join(os.path.dirname(synapse_expander.__file__),
                         "synapse_expander.xml"))
        if user_extra_algorithm_xml_path is not None:
            extra_algorithm_xml_path.extend(user_extra_algorithm_xml_path)

        # timing parameters
        self.__min_delay = None
        self.__max_delay = None

        self.__neurons_per_core_set = set()

        versions = [("sPyNNaker", version)]
        if front_end_versions is not None:
            versions.extend(front_end_versions)

        super(AbstractSpiNNakerCommon, self).__init__(
            configfile=self.CONFIG_FILE_NAME,
            executable_finder=self.__EXECUTABLE_FINDER,
            graph_label=graph_label,
            database_socket_addresses=database_socket_addresses,
            extra_algorithm_xml_paths=extra_algorithm_xml_path,
            n_chips_required=n_chips_required,
            n_boards_required=n_boards_required,
            default_config_paths=[
                os.path.join(os.path.dirname(__file__), self.CONFIG_FILE_NAME)
            ],
            front_end_versions=versions)

        extra_mapping_inputs = dict()
        extra_mapping_inputs['CreateAtomToEventIdMapping'] = \
            self.config.getboolean(
                "Database", "create_routing_info_to_neuron_id_mapping")
        if user_extra_mapping_inputs is not None:
            extra_mapping_inputs.update(user_extra_mapping_inputs)

        if extra_mapping_algorithms is None:
            extra_mapping_algorithms = []
        if extra_load_algorithms is None:
            extra_load_algorithms = []
        if extra_post_run_algorithms is None:
            extra_post_run_algorithms = []
        extra_load_algorithms.append("SynapseExpander")
        extra_algorithms_pre_run = []

        if self.config.getboolean("Reports", "draw_network_graph"):
            extra_mapping_algorithms.append(
                "SpYNNakerConnectionHolderGenerator")
            extra_load_algorithms.append(
                "SpYNNakerNeuronGraphNetworkSpecificationReport")

        if self.config.getboolean("Reports", "reports_enabled"):
            if self.config.getboolean("Reports", "write_synaptic_report"):
                extra_algorithms_pre_run.append("SynapticMatrixReport")
        if user_extra_algorithms_pre_run is not None:
            extra_algorithms_pre_run.extend(user_extra_algorithms_pre_run)

        self.update_extra_mapping_inputs(extra_mapping_inputs)
        self.extend_extra_mapping_algorithms(extra_mapping_algorithms)
        self.prepend_extra_pre_run_algorithms(extra_algorithms_pre_run)
        self.extend_extra_post_run_algorithms(extra_post_run_algorithms)
        self.extend_extra_load_algorithms(extra_load_algorithms)

        # set up machine targeted data
        self._set_up_timings(timestep, min_delay, max_delay, self.config,
                             time_scale_factor)
        self.set_up_machine_specifics(hostname)

        logger.info("Setting time scale factor to {}.", self.time_scale_factor)

        # get the machine time step
        logger.info("Setting machine time step to {} micro-seconds.",
                    self.machine_time_step)

    def _set_up_timings(self, timestep, min_delay, max_delay, config,
                        time_scale_factor):
        # pylint: disable=too-many-arguments

        # Get the standard values
        if timestep is None:
            self.set_up_timings(timestep, time_scale_factor)
        else:
            self.set_up_timings(
                math.ceil(timestep * MICRO_TO_MILLISECOND_CONVERSION),
                time_scale_factor)

        # Sort out the minimum delay
        if (min_delay is not None
                and (min_delay * MICRO_TO_MILLISECOND_CONVERSION) <
                self.machine_time_step):
            raise ConfigurationException(
                "Pacman does not support min delays below {} ms with the "
                "current machine time step".format(
                    constants.MIN_SUPPORTED_DELAY * self.machine_time_step))
        if min_delay is not None:
            self.__min_delay = min_delay
        else:
            self.__min_delay = (self.machine_time_step /
                                MICRO_TO_MILLISECOND_CONVERSION)

        # Sort out the maximum delay
        natively_supported_delay_for_models = \
            constants.MAX_SUPPORTED_DELAY_TICS
        delay_extension_max_supported_delay = (
            constants.MAX_DELAY_BLOCKS *
            constants.MAX_TIMER_TICS_SUPPORTED_PER_BLOCK)
        max_delay_tics_supported = \
            natively_supported_delay_for_models + \
            delay_extension_max_supported_delay
        if (max_delay is not None
                and max_delay * MICRO_TO_MILLISECOND_CONVERSION >
                max_delay_tics_supported * self.machine_time_step):
            raise ConfigurationException(
                "Pacman does not support max delays above {} ms with the "
                "current machine time step".format(0.144 *
                                                   self.machine_time_step))
        if max_delay is not None:
            self.__max_delay = max_delay
        else:
            self.__max_delay = (
                max_delay_tics_supported *
                (self.machine_time_step / MICRO_TO_MILLISECOND_CONVERSION))

        # Sort out the time scale factor if not user specified
        # (including config)
        if self.time_scale_factor is None:
            self.time_scale_factor = max(
                1.0,
                math.ceil(MICRO_TO_MILLISECOND_CONVERSION /
                          self.machine_time_step))
            if self.time_scale_factor > 1:
                logger.warning(
                    "A timestep was entered that has forced sPyNNaker to "
                    "automatically slow the simulation down from real time "
                    "by a factor of {}. To remove this automatic behaviour, "
                    "please enter a timescaleFactor value in your .{}",
                    self.time_scale_factor, self.CONFIG_FILE_NAME)

        # Check the combination of machine time step and time scale factor
        if (self.machine_time_step * self.time_scale_factor <
                MICRO_TO_MILLISECOND_CONVERSION):
            if not config.getboolean("Mode",
                                     "violate_1ms_wall_clock_restriction"):
                raise ConfigurationException(
                    "The combination of simulation time step and the machine "
                    "time scale factor results in a wall clock timer tick "
                    "that is currently not reliably supported by the"
                    "SpiNNaker machine.  If you would like to override this"
                    "behaviour (at your own risk), please add "
                    "violate_1ms_wall_clock_restriction = True to the [Mode] "
                    "section of your .{} file".format(self.CONFIG_FILE_NAME))
            logger.warning(
                "****************************************************")
            logger.warning(
                "*** The combination of simulation time step and  ***")
            logger.warning(
                "*** the machine time scale factor results in a   ***")
            logger.warning(
                "*** wall clock timer tick that is currently not  ***")
            logger.warning(
                "*** reliably supported by the SpiNNaker machine. ***")
            logger.warning(
                "****************************************************")

    def _detect_if_graph_has_changed(self, reset_flags=True):
        """ Iterate though the graph and look for changes.
        """
        changed, data_changed = super(AbstractSpiNNakerCommon, self).\
            _detect_if_graph_has_changed(reset_flags)

        # Additionally check populations for changes
        for population in self._populations:
            if population.requires_mapping:
                changed = True
            if reset_flags:
                population.mark_no_changes()

        # Additionally check projections for changes
        for projection in self._projections:
            if projection.requires_mapping:
                changed = True
            if reset_flags:
                projection.mark_no_changes()

        return changed, data_changed

    @property
    def min_delay(self):
        """ The minimum supported delay, in milliseconds.
        """
        return self.__min_delay

    @property
    def max_delay(self):
        """ The maximum supported delay, in milliseconds.
        """
        return self.__max_delay

    def add_application_vertex(self, vertex):
        if isinstance(vertex, CommandSender):
            self._command_sender = vertex

        AbstractSpinnakerBase.add_application_vertex(self, vertex)

    @staticmethod
    def _count_unique_keys(commands):
        unique_keys = {command.key for command in commands}
        return len(unique_keys)

    def add_population(self, population):
        """ Called by each population to add itself to the list.
        """
        self._populations.append(population)

    def add_projection(self, projection):
        """ Called by each projection to add itself to the list.
        """
        self._projections.append(projection)

    def stop(self,
             turn_off_machine=None,
             clear_routing_tables=None,
             clear_tags=None):
        """
        :param turn_off_machine: decides if the machine should be powered down\
            after running the execution. Note that this powers down all boards\
            connected to the BMP connections given to the transceiver
        :type turn_off_machine: bool
        :param clear_routing_tables: informs the tool chain if it\
            should turn off the clearing of the routing tables
        :type clear_routing_tables: bool
        :param clear_tags: informs the tool chain if it should clear the tags\
            off the machine at stop
        :type clear_tags: boolean
        :rtype: None
        """
        # pylint: disable=protected-access
        for population in self._populations:
            population._end()

        super(AbstractSpiNNakerCommon,
              self).stop(turn_off_machine, clear_routing_tables, clear_tags)
        self.reset_number_of_neurons_per_core()
        globals_variables.unset_simulator()

    def run(self, run_time):
        """ Run the model created.

        :param run_time: the time (in milliseconds) to run the simulation for
        """
        # pylint: disable=protected-access

        # extra post run algorithms
        self._dsg_algorithm = "SpynnakerDataSpecificationWriter"
        for projection in self._projections:
            projection._clear_cache()
        super(AbstractSpiNNakerCommon, self).run(run_time)

    @staticmethod
    def register_binary_search_path(search_path):
        """ Register an additional binary search path for executables.

        :param search_path: absolute search path for binaries
        """
        # pylint: disable=protected-access
        AbstractSpiNNakerCommon.__EXECUTABLE_FINDER.add_path(search_path)

    def set_number_of_neurons_per_core(self, neuron_type, max_permitted):
        if not hasattr(neuron_type, "set_model_max_atoms_per_core"):
            raise Exception("{} is not a Vertex type".format(neuron_type))

        if hasattr(neuron_type, "get_max_atoms_per_core"):
            previous = neuron_type.get_max_atoms_per_core()
            if previous < max_permitted:
                logger.warning(
                    "Attempt to increase number_of_neurons_per_core "
                    "from {} to {} ignored", previous, max_permitted)
                return
        neuron_type.set_model_max_atoms_per_core(max_permitted)
        self.__neurons_per_core_set.add(neuron_type)

    def reset_number_of_neurons_per_core(self):
        for neuron_type in self.__neurons_per_core_set:
            neuron_type.set_model_max_atoms_per_core()

    def get_projections_data(self, projection_to_attribute_map):
        """ Common data extractor for projection data. Allows fully \
            exploitation of the ????

        :param projection_to_attribute_map: \
            the projection to attributes mapping
        :type projection_to_attribute_map: \
            dict of projection with set of attributes
        :return: a extracted data object with get method for getting the data
        :rtype: \
            :py:class:`spynnaker.pyNN.utilities.extracted_data.ExtractedData`
        """
        # pylint: disable=protected-access

        # build data structure for holding data
        mother_lode = ExtractedData()

        # acquire data objects from front end
        using_monitors = self._last_run_outputs["UsingAdvancedMonitorSupport"]

        # if using extra monitor functionality, locate extra data items
        receivers = list()
        if using_monitors:
            receivers = self._locate_receivers_from_projections(
                projection_to_attribute_map.keys(),
                self.get_generated_output(
                    "MemoryMCGatherVertexToEthernetConnectedChipMapping"),
                self.get_generated_output("MemoryExtraMonitorToChipMapping"))

        # set up the router timeouts to stop packet loss
        for data_receiver, extra_monitor_cores in receivers:
            data_receiver.load_system_routing_tables(
                self._txrx,
                self.get_generated_output("MemoryExtraMonitorVertices"),
                self._placements)
            data_receiver.set_cores_for_data_streaming(
                self._txrx, list(extra_monitor_cores), self._placements)

        # acquire the data
        for projection in projection_to_attribute_map:
            for attribute in projection_to_attribute_map[projection]:
                data = projection._get_synaptic_data(
                    as_list=True,
                    data_to_get=attribute,
                    fixed_values=None,
                    notify=None,
                    handle_time_out_configuration=False)
                mother_lode.set(projection, attribute, data)

        # reset time outs for the receivers
        for data_receiver, extra_monitor_cores in receivers:
            data_receiver.unset_cores_for_data_streaming(
                self._txrx, list(extra_monitor_cores), self._placements)
            data_receiver.load_application_routing_tables(
                self._txrx,
                self.get_generated_output("MemoryExtraMonitorVertices"),
                self._placements)

        # return data items
        return mother_lode

    def _locate_receivers_from_projections(self, projections, gatherers,
                                           extra_monitors_per_chip):
        """ Locate receivers and their corresponding monitor cores for\
            setting router time-outs.

        :param projections: the projections going to be read
        :param gatherers: the gatherers per Ethernet chip
        :param extra_monitors_per_chip: the extra monitor cores per chip
        :return: list of tuples with gatherer and its extra monitor cores
        """
        # pylint: disable=protected-access
        important_gathers = set()

        # iterate though projections
        for projection in projections:

            # iteration though the projections machine edges to locate chips
            edges = self._graph_mapper.get_machine_edges(
                projection._projection_edge)

            for edge in edges:
                placement = self._placements.get_placement_of_vertex(
                    edge.post_vertex)
                chip = self._machine.get_chip_at(placement.x, placement.y)

                # locate extra monitor cores on the board of this chip
                extra_monitor_cores_on_board = set(
                    extra_monitors_per_chip[xy]
                    for xy in self._machine.get_existing_xys_on_board(chip))

                # map gatherer to extra monitor cores for board
                important_gathers.add(
                    (gatherers[(chip.nearest_ethernet_x,
                                chip.nearest_ethernet_y)],
                     frozenset(extra_monitor_cores_on_board)))
        return list(important_gathers)

    @property
    def id_counter(self):
        """ Getter for id_counter, currently used by the populations.

        .. note::
            Maybe it could live in the pop class???

        :return:
        :rtype: int
        """
        return self.__id_counter

    @id_counter.setter
    def id_counter(self, new_value):
        """ Setter for id_counter, currently used by the populations.

        .. note::
            Maybe it could live in the pop class???

        :param new_value: new value for id_counter
        :type new_value: int
        :return:
        """
        self.__id_counter = new_value
示例#6
0
class AbstractSpiNNakerCommon(AbstractSpinnakerBase):
    """ Main interface for neural code.
    """
    __slots__ = [
        "__command_edge_count",
        "__edge_count",
        "__id_counter",
        "__live_spike_recorder",
        "__min_delay",
        "__neurons_per_core_set",
        "_populations",
        "_projections"]

    __EXECUTABLE_FINDER = ExecutableFinder()

    def __init__(
            self, graph_label, database_socket_addresses, n_chips_required,
            n_boards_required, timestep, min_delay,
            time_scale_factor=None):
        """
        :param str graph_label:
        :param database_socket_addresses:
        :type database_socket_addresses:
            iterable(~spinn_utilities.socket_address.SocketAddress)
        :param n_chips_required:
        :type n_chips_required: int or None
        :param n_boards_required:
        :type n_boards_required: int or None
        :param timestep:
            machine_time_step but in milli seconds. If None uses the cfg value
        :type timestep: float or None
        :param float min_delay:
        :param str hostname:
        :param time_scale_factor:
        :type time_scale_factor: float or None
        """
        # pylint: disable=too-many-arguments, too-many-locals

        setup_configs()

        # add model binaries
        self.__EXECUTABLE_FINDER.add_path(
            os.path.dirname(model_binaries.__file__))

        # pynn population objects
        self._populations = []
        self._projections = []
        self.__edge_count = 0
        self.__id_counter = 0

        # the number of edges that are associated with commands being sent to
        # a vertex
        self.__command_edge_count = 0
        self.__live_spike_recorder = dict()

        # timing parameters
        self.__min_delay = None

        self.__neurons_per_core_set = set()

        super().__init__(
            executable_finder=self.__EXECUTABLE_FINDER,
            graph_label=graph_label,
            database_socket_addresses=database_socket_addresses,
            n_chips_required=n_chips_required,
            n_boards_required=n_boards_required)

        # set up machine targeted data
        self._set_up_timings(timestep, min_delay, time_scale_factor)
        self.check_machine_specifics()

        logger.info(f'Setting time scale factor to '
                    f'{self.time_scale_factor}.')

        # get the machine time step
        logger.info(f'Setting machine time step to '
                    f'{self.machine_time_step} '
                    f'micro-seconds.')

    def _set_up_timings(self, timestep, min_delay, time_scale_factor):
        """
        :param timestep: machine_time_Step in milli seconds
        :type timestep: float or None
        :tpye min_delay: int or None
        :type time_scale_factor: int or None
        """

        # Get the standard values
        if timestep is None:
            self.set_up_timings(timestep, time_scale_factor)
        else:
            self.set_up_timings(
                math.ceil(timestep * MICRO_TO_MILLISECOND_CONVERSION),
                time_scale_factor)

        # Sort out the minimum delay
        if (min_delay is not None and
                min_delay < self.machine_time_step_ms):
            raise ConfigurationException(
                f"Pacman does not support min delays below "
                f"{constants.MIN_SUPPORTED_DELAY * self.machine_time_step} "
                f"ms with the current machine time step")
        if min_delay is not None:
            self.__min_delay = min_delay
        else:
            self.__min_delay = self.machine_time_step_ms

        # Sort out the time scale factor if not user specified
        # (including config)
        if self.time_scale_factor is None:
            self.time_scale_factor = max(
                1.0, math.ceil(
                    MICRO_TO_MILLISECOND_CONVERSION / self.machine_time_step))
            if self.time_scale_factor > 1:
                logger.warning(
                    "A timestep was entered that has forced sPyNNaker to "
                    "automatically slow the simulation down from real time "
                    "by a factor of {}. To remove this automatic behaviour, "
                    "please enter a timescaleFactor value in your .{}",
                    self.time_scale_factor, CONFIG_FILE_NAME)

        # Check the combination of machine time step and time scale factor
        if (self.machine_time_step_ms * self.time_scale_factor < 1):
            if not get_config_bool(
                    "Mode", "violate_1ms_wall_clock_restriction"):
                raise ConfigurationException(
                    "The combination of simulation time step and the machine "
                    "time scale factor results in a wall clock timer tick "
                    "that is currently not reliably supported by the"
                    "SpiNNaker machine.  If you would like to override this"
                    "behaviour (at your own risk), please add "
                    "violate_1ms_wall_clock_restriction = True to the [Mode] "
                    "section of your .{} file".format(CONFIG_FILE_NAME))
            logger.warning(
                "****************************************************")
            logger.warning(
                "*** The combination of simulation time step and  ***")
            logger.warning(
                "*** the machine time scale factor results in a   ***")
            logger.warning(
                "*** wall clock timer tick that is currently not  ***")
            logger.warning(
                "*** reliably supported by the SpiNNaker machine. ***")
            logger.warning(
                "****************************************************")

    def _detect_if_graph_has_changed(self, reset_flags=True):
        """ Iterate though the graph and look for changes.

        :param bool reset_flags:
        """
        changed, data_changed = super()._detect_if_graph_has_changed(
            reset_flags)

        # Additionally check populations for changes
        for population in self._populations:
            if population.requires_mapping:
                changed = True
            if reset_flags:
                population.mark_no_changes()

        # Additionally check projections for changes
        for projection in self._projections:
            if projection.requires_mapping:
                changed = True
            if reset_flags:
                projection.mark_no_changes()

        return changed, data_changed

    @property
    def min_delay(self):
        """ The minimum supported delay, in milliseconds.
        """
        return self.__min_delay

    def add_application_vertex(self, vertex):
        if isinstance(vertex, CommandSender):
            raise NotImplementedError(
                "Please contact spinnker team as adding a CommandSender "
                "currently disabled")
        super().add_application_vertex(vertex)

    @staticmethod
    def _count_unique_keys(commands):
        unique_keys = {command.key for command in commands}
        return len(unique_keys)

    def add_population(self, population):
        """ Called by each population to add itself to the list.
        """
        self._populations.append(population)

    def add_projection(self, projection):
        """ Called by each projection to add itself to the list.
        """
        self._projections.append(projection)

    def stop(self, turn_off_machine=None, clear_routing_tables=None,
             clear_tags=None):
        """
        :param turn_off_machine: decides if the machine should be powered down
            after running the execution. Note that this powers down all boards
            connected to the BMP connections given to the transceiver
        :type turn_off_machine: bool or None
        :param clear_routing_tables: informs the tool chain if it
            should turn off the clearing of the routing tables
        :type clear_routing_tables: bool or None
        :param clear_tags: informs the tool chain if it should clear the tags
            off the machine at stop
        :type clear_tags: bool or None
        :rtype: None
        """
        # pylint: disable=protected-access
        for population in self._populations:
            population._end()

        super().stop(turn_off_machine, clear_routing_tables, clear_tags)
        self.reset_number_of_neurons_per_core()

    def run(self, run_time, sync_time=0.0):
        """ Run the model created.

        :param run_time: the time (in milliseconds) to run the simulation for
        :type run_time: float or int
        :param float sync_time:
            If not 0, this specifies that the simulation should pause after
            this duration.  The continue_simulation() method must then be
            called for the simulation to continue.
        :rtype: None
        """
        # pylint: disable=protected-access

        # extra post run algorithms
        for projection in self._projections:
            projection._clear_cache()

        super().run(run_time, sync_time)
        for projection in self._projections:
            projection._clear_cache()

    @staticmethod
    def register_binary_search_path(search_path):
        """ Register an additional binary search path for executables.

        :param str search_path: absolute search path for binaries
        :rtype: None
        """
        # pylint: disable=protected-access
        AbstractSpiNNakerCommon.__EXECUTABLE_FINDER.add_path(search_path)

    def set_number_of_neurons_per_core(self, neuron_type, max_permitted):
        if not hasattr(neuron_type, "set_model_max_atoms_per_core"):
            raise Exception("{} is not a Vertex type".format(neuron_type))

        if hasattr(neuron_type, "get_max_atoms_per_core"):
            previous = neuron_type.get_max_atoms_per_core()
            if previous < max_permitted:
                logger.warning(
                    "Attempt to increase number_of_neurons_per_core "
                    "from {} to {} ignored", previous, max_permitted)
                return
        neuron_type.set_model_max_atoms_per_core(max_permitted)
        self.__neurons_per_core_set.add(neuron_type)

    def reset_number_of_neurons_per_core(self):
        for neuron_type in self.__neurons_per_core_set:
            neuron_type.set_model_max_atoms_per_core()

    def _locate_receivers_from_projections(
            self, projections, gatherers, extra_monitors_per_chip):
        """ Locate receivers and their corresponding monitor cores for\
            setting router time-outs.

        :param list projections: the projections going to be read
        :param gatherers: the gatherers per Ethernet chip
        :param extra_monitors_per_chip: the extra monitor cores per chip
        :return: list of tuples with gatherer and its extra monitor cores
        :rtype: list
        """
        # pylint: disable=protected-access
        important_gathers = set()

        # iterate though projections
        for projection in projections:
            # iteration though the projections machine edges to locate chips
            for edge in projection._projection_edge.machine_edges:
                placement = self._placements.get_placement_of_vertex(
                    edge.post_vertex)
                chip = self._machine.get_chip_at(placement.x, placement.y)

                # locate extra monitor cores on the board of this chip
                extra_monitor_cores_on_board = set(
                    extra_monitors_per_chip[xy]
                    for xy in self._machine.get_existing_xys_on_board(chip))

                # map gatherer to extra monitor cores for board
                important_gathers.add((
                    gatherers[(chip.nearest_ethernet_x,
                               chip.nearest_ethernet_y)],
                    frozenset(extra_monitor_cores_on_board)))
        return list(important_gathers)

    @property
    def id_counter(self):
        """ The id_counter, currently used by the populations.

        .. note::
            Maybe it could live in the pop class???

        :rtype: int
        """
        return self.__id_counter

    @id_counter.setter
    def id_counter(self, new_value):
        """ Setter for id_counter, currently used by the populations.

        .. note::
            Maybe it could live in the pop class???

        :param int new_value: new value for id_counter
        """
        self.__id_counter = new_value

    @overrides(AbstractSpinnakerBase._execute_graph_data_specification_writer)
    def _execute_graph_data_specification_writer(self):
        with FecTimer(DATA_GENERATION, "Spynnaker data specification writer"):
            self._dsg_targets = spynnaker_data_specification_writer(
                self._placements, self._ipaddress, self._machine,
                self._app_id, self._max_run_time_steps)

    def _execute_spynnaker_ordered_covering_compressor(self):
        with FecTimer(
                LOADING,
                "Spynnaker machine bitfield ordered covering compressor") \
                as timer:
            if timer.skip_if_virtual_board():
                return
            spynnaker_machine_bitfield_ordered_covering_compressor(
                self._router_tables, self._txrx, self._machine, self._app_id,
                self._machine_graph, self._placements, self._executable_finder,
                self._routing_infos, self._executable_targets,
                get_config_bool("Reports", "write_expander_iobuf"))
            self._multicast_routes_loaded = True
            return None

    def _execute_spynnaker_pair_compressor(self):
        with FecTimer(
                LOADING, "Spynnaker machine bitfield pair router compressor") \
                as timer:
            if timer.skip_if_virtual_board():
                return
            spynnaker_machine_bitField_pair_router_compressor(
                self._router_tables, self._txrx, self._machine, self._app_id,
                self._machine_graph, self._placements, self._executable_finder,
                self._routing_infos, self._executable_targets,
                get_config_bool("Reports", "write_expander_iobuf"))
            self._multicast_routes_loaded = True
            return None

    @overrides(AbstractSpinnakerBase._do_delayed_compression)
    def _do_delayed_compression(self, name, compressed):
        if name == "SpynnakerMachineBitFieldOrderedCoveringCompressor":
            return self._execute_spynnaker_ordered_covering_compressor()

        if name == "SpynnakerMachineBitFieldPairRouterCompressor":
            return self._execute_spynnaker_pair_compressor()

        return AbstractSpinnakerBase._do_delayed_compression(
            self, name, compressed)

    def _execute_synapse_expander(self):
        with FecTimer(LOADING, "Synapse expander") as timer:
            if timer.skip_if_virtual_board():
                return
            synapse_expander(
                self.placements, self._txrx, self._executable_finder,
                get_config_bool("Reports", "write_expander_iobuf"))

    def _execute_on_chip_bit_field_generator(self):
        with FecTimer(LOADING, "Execute on chip bitfield generator") as timer:
            if timer.skip_if_virtual_board():
                return
            on_chip_bitfield_generator(
                self.placements, self.application_graph,
                self._executable_finder,  self._txrx, self._machine_graph,
                self._routing_infos)

    def _execute_finish_connection_holders(self):
        with FecTimer(LOADING, "Finish connection holders"):
            finish_connection_holders(self.application_graph)

    @overrides(AbstractSpinnakerBase._do_extra_load_algorithms)
    def _do_extra_load_algorithms(self):
        self._execute_synapse_expander()
        self._execute_on_chip_bit_field_generator()
        self._execute_finish_connection_holders()

    def _execute_write_network_graph(self):
        with FecTimer(
                MAPPING,
                "SpYNNakerNeuronGraphNetworkSpecificationReport") as timer:
            if timer.skip_if_cfg_false("Reports", "write_network_graph"):
                return
            spynnaker_neuron_graph_network_specification_report(
                self._application_graph)

    @overrides(AbstractSpinnakerBase._do_extra_mapping_algorithms,
               extend_doc=False)
    def _do_extra_mapping_algorithms(self):
        self._execute_write_network_graph()

    @overrides(AbstractSpinnakerBase._do_provenance_reports)
    def _do_provenance_reports(self):
        AbstractSpinnakerBase._do_provenance_reports(self)
        self._report_redundant_packet_count()

    def _report_redundant_packet_count(self):
        with FecTimer(RUN_LOOP, "Redundant packet count report") as timer:
            if timer.skip_if_cfg_false(
                    "Reports", "write_redundant_packet_count_report"):
                return
            redundant_packet_count_report()

    @overrides(AbstractSpinnakerBase._execute_splitter_selector)
    def _execute_splitter_selector(self):
        with FecTimer(MAPPING, "Spynnaker splitter selector"):
            spynnaker_splitter_selector(self._application_graph)

    @overrides(AbstractSpinnakerBase._execute_delay_support_adder,
               extend_doc=False)
    def _execute_delay_support_adder(self):
        """
        Runs, times and logs the DelaySupportAdder if required
        """
        name = get_config_str("Mapping", "delay_support_adder")
        if name is None:
            return
        with FecTimer(MAPPING, "DelaySupportAdder"):
            if name == "DelaySupportAdder":
                delay_support_adder(self._application_graph)
                return
            raise ConfigurationException(
                f"Unexpected cfg setting delay_support_adder: {name}")

    @overrides(AbstractSpinnakerBase._execute_splitter_partitioner)
    def _execute_splitter_partitioner(self, pre_allocated_resources):
        if not self._application_graph.n_vertices:
            return
        with FecTimer(MAPPING,  "SpynnakerSplitterPartitioner"):
            if self._machine:
                machine = self._machine
            else:
                machine = self._max_machine
            self._machine_graph, self._n_chips_needed = \
                spynnaker_splitter_partitioner(
                    self._application_graph, machine, self._plan_n_timesteps,
                    pre_allocated_resources)
示例#7
0
def setup(hostname=None,
          graph_label=None,
          model_binary_module=None,
          model_binary_folder=None,
          database_socket_addresses=None,
          user_dsg_algorithm=None,
          n_chips_required=None,
          extra_pre_run_algorithms=None,
          extra_post_run_algorithms=None,
          time_scale_factor=None,
          machine_time_step=None):
    """

    :param hostname:\
        the hostname of the SpiNNaker machine to operate on\
        (over rides the machine_name from the cfg file).
    :type hostname: str
    :param graph_label:\
        a human readable label for the graph (used mainly in reports)
    :type graph_label: str
    :param model_binary_module:\
        the module where the binary files can be found for the c code that is \
        being used in this application; mutually exclusive with the \
        model_binary_folder.
    :type model_binary_module: python module
    :param model_binary_folder:\
        the folder where the binary files can be found for the c code that is\
        being used in this application; mutually exclusive with the\
        model_binary_module.
    :type model_binary_folder: str
    :param database_socket_addresses:\
        set of SocketAddresses that need to be added for the database\
        notification functionality. This are over and above the ones used by\
        the LiveEventConnection
    :type database_socket_addresses: list of SocketAddresses
    :param user_dsg_algorithm:\
        an algorithm used for generating the application data which is loaded\
        onto the machine. if not set, will use the data specification language\
        algorithm required for the type of graph being used.
    :type user_dsg_algorithm: str
    :param n_chips_required:\
        if you need to be allocated a machine (for spalloc) before building\
        your graph, then fill this in with a general idea of the number of\
        chips you need so that the spalloc system can allocate you a machine\
        big enough for your needs.
    :type n_chips_required: int or None
    :param extra_pre_run_algorithms:\
        algorithms which need to be ran after mapping and loading has occurred\
        but before the system has ran. These are plugged directly into the\
        work flow management.
    :type extra_post_run_algorithms: list of str
    :param extra_post_run_algorithms:\
        algorithms which need to be ran after the simulation has ran. These\
        could be post processing of generated data on the machine for example.
    :type extra_pre_run_algorithms: list of str
    """
    global _none_labelled_vertex_count
    global _none_labelled_edge_count

    logger.info("SpiNNaker graph front end (c) {}, "
                "University of Manchester".format(__version_year__))
    parent_dir = os.path.split(os.path.split(gfe_file.__file__)[0])[0]
    logger.info(
        "Release version {}({}) - {} {}. Installed in folder {}".format(
            __version__, __version_name__, __version_month__, __version_year__,
            parent_dir))

    # add the directories where the binaries are located
    executable_finder = ExecutableFinder()
    if model_binary_module is not None:
        executable_finder.add_path(
            os.path.dirname(model_binary_module.__file__))
    elif model_binary_folder is not None:
        executable_finder.add_path(model_binary_folder)
    else:
        file_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
        executable_finder.add_path(file_dir)

    # set up the spinnaker object
    SpiNNaker(host_name=hostname,
              graph_label=graph_label,
              executable_finder=executable_finder,
              database_socket_addresses=database_socket_addresses,
              dsg_algorithm=user_dsg_algorithm,
              n_chips_required=n_chips_required,
              extra_pre_run_algorithms=extra_pre_run_algorithms,
              extra_post_run_algorithms=extra_post_run_algorithms,
              machine_time_step=machine_time_step,
              time_scale_factor=time_scale_factor)
class AbstractSpiNNakerCommon(AbstractSpinnakerBase):
    """ Main interface for neural code.
    """
    __slots__ = [
        "__command_edge_count", "__edge_count", "__id_counter",
        "__live_spike_recorder", "__min_delay", "__neurons_per_core_set",
        "_populations", "_projections"
    ]

    __EXECUTABLE_FINDER = ExecutableFinder()

    def __init__(self,
                 graph_label,
                 database_socket_addresses,
                 n_chips_required,
                 n_boards_required,
                 timestep,
                 min_delay,
                 hostname,
                 user_extra_algorithm_xml_path=None,
                 user_extra_mapping_inputs=None,
                 user_extra_algorithms_pre_run=None,
                 time_scale_factor=None,
                 extra_post_run_algorithms=None,
                 extra_mapping_algorithms=None,
                 extra_load_algorithms=None,
                 front_end_versions=None):
        """
        :param str graph_label:
        :param database_socket_addresses:
        :type database_socket_addresses:
            iterable(~spinn_utilities.socket_address.SocketAddress)
        :param n_chips_required:
        :type n_chips_required: int or None
        :param n_boards_required:
        :type n_boards_required: int or None
        :param timestep:
            machine_time_step but in milli seconds. If None uses the cfg value
        :type timestep: float or None
        :param float min_delay:
        :param str hostname:
        :param user_extra_algorithm_xml_path:
        :type user_extra_algorithm_xml_path: str or None
        :param user_extra_mapping_inputs:
        :type user_extra_mapping_inputs: dict(str, Any) or None
        :param user_extra_algorithms_pre_run:
        :type user_extra_algorithms_pre_run: list(str) or None
        :param time_scale_factor:
        :type time_scale_factor: float or None
        :param extra_post_run_algorithms:
        :type extra_post_run_algorithms: list(str) or None
        :param extra_mapping_algorithms:
        :type extra_mapping_algorithms: list(str) or None
        :param extra_load_algorithms:
        :type extra_load_algorithms: list(str) or None
        :param front_end_versions:
        :type front_end_versions: list(tuple(str,str)) or None
        """
        # pylint: disable=too-many-arguments, too-many-locals

        setup_configs()

        # add model binaries
        self.__EXECUTABLE_FINDER.add_path(
            os.path.dirname(model_binaries.__file__))

        # pynn population objects
        self._populations = []
        self._projections = []
        self.__edge_count = 0
        self.__id_counter = 0

        # the number of edges that are associated with commands being sent to
        # a vertex
        self.__command_edge_count = 0
        self.__live_spike_recorder = dict()

        # create XML path for where to locate sPyNNaker related functions when
        # using auto pause and resume
        extra_algorithm_xml_path = list()
        extra_algorithm_xml_path.append(
            os.path.join(os.path.dirname(extra_algorithms.__file__),
                         "algorithms_metadata.xml"))
        if user_extra_algorithm_xml_path is not None:
            extra_algorithm_xml_path.extend(user_extra_algorithm_xml_path)

        # timing parameters
        self.__min_delay = None

        self.__neurons_per_core_set = set()

        versions = [("sPyNNaker", version)]
        if front_end_versions is not None:
            versions.extend(front_end_versions)

        super().__init__(executable_finder=self.__EXECUTABLE_FINDER,
                         graph_label=graph_label,
                         database_socket_addresses=database_socket_addresses,
                         extra_algorithm_xml_paths=extra_algorithm_xml_path,
                         n_chips_required=n_chips_required,
                         n_boards_required=n_boards_required,
                         front_end_versions=versions)

        # update inputs needed by the machine level calls.

        extra_mapping_inputs = dict()
        extra_mapping_inputs["SynapticExpanderReadIOBuf"] = \
            get_config_bool("Reports", "write_expander_iobuf")
        if user_extra_mapping_inputs is not None:
            extra_mapping_inputs.update(user_extra_mapping_inputs)

        if extra_mapping_algorithms is None:
            extra_mapping_algorithms = []
        if extra_load_algorithms is None:
            extra_load_algorithms = []
        if extra_post_run_algorithms is None:
            extra_post_run_algorithms = []
        extra_load_algorithms.append("SynapseExpander")
        extra_load_algorithms.append("OnChipBitFieldGenerator")
        extra_load_algorithms.append("FinishConnectionHolders")
        extra_algorithms_pre_run = []

        if get_config_bool("Reports", "write_network_graph"):
            extra_mapping_algorithms.append(
                "SpYNNakerNeuronGraphNetworkSpecificationReport")

        if get_config_bool("Reports", "reports_enabled"):
            if get_config_bool("Reports", "write_synaptic_report"):
                logger.exception(
                    "write_synaptic_report ignored due to https://github.com/"
                    "SpiNNakerManchester/sPyNNaker/issues/1081")
                # extra_algorithms_pre_run.append("SynapticMatrixReport")
        if user_extra_algorithms_pre_run is not None:
            extra_algorithms_pre_run.extend(user_extra_algorithms_pre_run)

        self.update_extra_mapping_inputs(extra_mapping_inputs)
        self.extend_extra_mapping_algorithms(extra_mapping_algorithms)
        self.prepend_extra_pre_run_algorithms(extra_algorithms_pre_run)
        self.extend_extra_post_run_algorithms(extra_post_run_algorithms)
        self.extend_extra_load_algorithms(extra_load_algorithms)

        # set up machine targeted data
        self._set_up_timings(timestep, min_delay, time_scale_factor)
        self.set_up_machine_specifics(hostname)

        logger.info(f'Setting time scale factor to '
                    f'{self.time_scale_factor}.')

        # get the machine time step
        logger.info(f'Setting machine time step to '
                    f'{self.machine_time_step} '
                    f'micro-seconds.')

    def _set_up_timings(self, timestep, min_delay, time_scale_factor):
        """
        :param timestep: machine_time_Step in milli seconds
        :type timestep: float or None
        :tpye min_delay: int or None
        :type time_scale_factor: int or None
        """

        # Get the standard values
        if timestep is None:
            self.set_up_timings(timestep, time_scale_factor)
        else:
            self.set_up_timings(
                math.ceil(timestep * MICRO_TO_MILLISECOND_CONVERSION),
                time_scale_factor)

        # Sort out the minimum delay
        if (min_delay is not None and min_delay < self.machine_time_step_ms):
            raise ConfigurationException(
                f"Pacman does not support min delays below "
                f"{constants.MIN_SUPPORTED_DELAY * self.machine_time_step} "
                f"ms with the current machine time step")
        if min_delay is not None:
            self.__min_delay = min_delay
        else:
            self.__min_delay = self.machine_time_step_ms

        # Sort out the time scale factor if not user specified
        # (including config)
        if self.time_scale_factor is None:
            self.time_scale_factor = max(
                1.0,
                math.ceil(MICRO_TO_MILLISECOND_CONVERSION /
                          self.machine_time_step))
            if self.time_scale_factor > 1:
                logger.warning(
                    "A timestep was entered that has forced sPyNNaker to "
                    "automatically slow the simulation down from real time "
                    "by a factor of {}. To remove this automatic behaviour, "
                    "please enter a timescaleFactor value in your .{}",
                    self.time_scale_factor, CONFIG_FILE_NAME)

        # Check the combination of machine time step and time scale factor
        if (self.machine_time_step_ms * self.time_scale_factor < 1):
            if not get_config_bool("Mode",
                                   "violate_1ms_wall_clock_restriction"):
                raise ConfigurationException(
                    "The combination of simulation time step and the machine "
                    "time scale factor results in a wall clock timer tick "
                    "that is currently not reliably supported by the"
                    "SpiNNaker machine.  If you would like to override this"
                    "behaviour (at your own risk), please add "
                    "violate_1ms_wall_clock_restriction = True to the [Mode] "
                    "section of your .{} file".format(CONFIG_FILE_NAME))
            logger.warning(
                "****************************************************")
            logger.warning(
                "*** The combination of simulation time step and  ***")
            logger.warning(
                "*** the machine time scale factor results in a   ***")
            logger.warning(
                "*** wall clock timer tick that is currently not  ***")
            logger.warning(
                "*** reliably supported by the SpiNNaker machine. ***")
            logger.warning(
                "****************************************************")

    def _detect_if_graph_has_changed(self, reset_flags=True):
        """ Iterate though the graph and look for changes.

        :param bool reset_flags:
        """
        changed, data_changed = super()._detect_if_graph_has_changed(
            reset_flags)

        # Additionally check populations for changes
        for population in self._populations:
            if population.requires_mapping:
                changed = True
            if reset_flags:
                population.mark_no_changes()

        # Additionally check projections for changes
        for projection in self._projections:
            if projection.requires_mapping:
                changed = True
            if reset_flags:
                projection.mark_no_changes()

        return changed, data_changed

    @property
    def min_delay(self):
        """ The minimum supported delay, in milliseconds.
        """
        return self.__min_delay

    def add_application_vertex(self, vertex):
        if isinstance(vertex, CommandSender):
            self._command_sender = vertex
        super().add_application_vertex(vertex)

    @staticmethod
    def _count_unique_keys(commands):
        unique_keys = {command.key for command in commands}
        return len(unique_keys)

    def add_population(self, population):
        """ Called by each population to add itself to the list.
        """
        self._populations.append(population)

    def add_projection(self, projection):
        """ Called by each projection to add itself to the list.
        """
        self._projections.append(projection)

    def stop(self,
             turn_off_machine=None,
             clear_routing_tables=None,
             clear_tags=None):
        """
        :param turn_off_machine: decides if the machine should be powered down
            after running the execution. Note that this powers down all boards
            connected to the BMP connections given to the transceiver
        :type turn_off_machine: bool or None
        :param clear_routing_tables: informs the tool chain if it
            should turn off the clearing of the routing tables
        :type clear_routing_tables: bool or None
        :param clear_tags: informs the tool chain if it should clear the tags
            off the machine at stop
        :type clear_tags: bool or None
        :rtype: None
        """
        # pylint: disable=protected-access
        for population in self._populations:
            population._end()

        super().stop(turn_off_machine, clear_routing_tables, clear_tags)
        self.reset_number_of_neurons_per_core()

    def run(self, run_time, sync_time=0.0):
        """ Run the model created.

        :param run_time: the time (in milliseconds) to run the simulation for
        :type run_time: float or int
        :param float sync_time:
            If not 0, this specifies that the simulation should pause after
            this duration.  The continue_simulation() method must then be
            called for the simulation to continue.
        :rtype: None
        """
        # pylint: disable=protected-access

        # extra post run algorithms
        self._dsg_algorithm = "SpynnakerDataSpecificationWriter"
        for projection in self._projections:
            projection._clear_cache()

        if (get_config_bool("Reports", "reports_enabled") and get_config_bool(
                "Reports", "write_redundant_packet_count_report")
                and not self._use_virtual_board and run_time is not None
                and not self._has_ran
                and get_config_bool("Reports", "writeProvenanceData")):
            self.extend_extra_post_run_algorithms(
                ["RedundantPacketCountReport"])

        super().run(run_time, sync_time)
        for projection in self._projections:
            projection._clear_cache()

    @staticmethod
    def register_binary_search_path(search_path):
        """ Register an additional binary search path for executables.

        :param str search_path: absolute search path for binaries
        :rtype: None
        """
        # pylint: disable=protected-access
        AbstractSpiNNakerCommon.__EXECUTABLE_FINDER.add_path(search_path)

    def set_number_of_neurons_per_core(self, neuron_type, max_permitted):
        if not hasattr(neuron_type, "set_model_max_atoms_per_core"):
            raise Exception("{} is not a Vertex type".format(neuron_type))

        if hasattr(neuron_type, "get_max_atoms_per_core"):
            previous = neuron_type.get_max_atoms_per_core()
            if previous < max_permitted:
                logger.warning(
                    "Attempt to increase number_of_neurons_per_core "
                    "from {} to {} ignored", previous, max_permitted)
                return
        neuron_type.set_model_max_atoms_per_core(max_permitted)
        self.__neurons_per_core_set.add(neuron_type)

    def reset_number_of_neurons_per_core(self):
        for neuron_type in self.__neurons_per_core_set:
            neuron_type.set_model_max_atoms_per_core()

    def get_projections_data(self, projection_to_attribute_map):
        """ Common data extractor for projection data. Allows fully
            exploitation of the ????

        :param projection_to_attribute_map:
            the projection to attributes mapping
        :type projection_to_attribute_map:
            dict(~spynnaker.pyNN.models.projection.Projection,
            list(int) or tuple(int) or None)
        :return: a extracted data object with get method for getting the data
        :rtype: ExtractedData
        """
        # pylint: disable=protected-access

        # build data structure for holding data
        mother_lode = ExtractedData()

        # if using extra monitor functionality, locate extra data items
        receivers = list()
        if get_config_bool("Machine", "enable_advanced_monitor_support"):
            receivers = self._locate_receivers_from_projections(
                projection_to_attribute_map.keys(),
                self.get_generated_output(
                    "VertexToEthernetConnectedChipMapping"),
                self.get_generated_output("ExtraMonitorToChipMapping"))

        # set up the router timeouts to stop packet loss
        for data_receiver, extra_monitor_cores in receivers:
            data_receiver.load_system_routing_tables(
                self._txrx, self.get_generated_output("ExtraMonitorVertices"),
                self._placements)
            data_receiver.set_cores_for_data_streaming(
                self._txrx, list(extra_monitor_cores), self._placements)

        # acquire the data
        for projection in projection_to_attribute_map:
            for attribute in projection_to_attribute_map[projection]:
                data = projection._get_synaptic_data(
                    as_list=True,
                    data_to_get=attribute,
                    fixed_values=None,
                    notify=None,
                    handle_time_out_configuration=False)
                mother_lode.set(projection, attribute, data)

        # reset time outs for the receivers
        for data_receiver, extra_monitor_cores in receivers:
            data_receiver.unset_cores_for_data_streaming(
                self._txrx, list(extra_monitor_cores), self._placements)
            data_receiver.load_application_routing_tables(
                self._txrx, self.get_generated_output("ExtraMonitorVertices"),
                self._placements)

        # return data items
        return mother_lode

    def _locate_receivers_from_projections(self, projections, gatherers,
                                           extra_monitors_per_chip):
        """ Locate receivers and their corresponding monitor cores for\
            setting router time-outs.

        :param list projections: the projections going to be read
        :param gatherers: the gatherers per Ethernet chip
        :param extra_monitors_per_chip: the extra monitor cores per chip
        :return: list of tuples with gatherer and its extra monitor cores
        :rtype: list
        """
        # pylint: disable=protected-access
        important_gathers = set()

        # iterate though projections
        for projection in projections:
            # iteration though the projections machine edges to locate chips
            for edge in projection._projection_edge.machine_edges:
                placement = self._placements.get_placement_of_vertex(
                    edge.post_vertex)
                chip = self._machine.get_chip_at(placement.x, placement.y)

                # locate extra monitor cores on the board of this chip
                extra_monitor_cores_on_board = set(
                    extra_monitors_per_chip[xy]
                    for xy in self._machine.get_existing_xys_on_board(chip))

                # map gatherer to extra monitor cores for board
                important_gathers.add(
                    (gatherers[(chip.nearest_ethernet_x,
                                chip.nearest_ethernet_y)],
                     frozenset(extra_monitor_cores_on_board)))
        return list(important_gathers)

    @property
    def id_counter(self):
        """ The id_counter, currently used by the populations.

        .. note::
            Maybe it could live in the pop class???

        :rtype: int
        """
        return self.__id_counter

    @id_counter.setter
    def id_counter(self, new_value):
        """ Setter for id_counter, currently used by the populations.

        .. note::
            Maybe it could live in the pop class???

        :param int new_value: new value for id_counter
        """
        self.__id_counter = new_value
示例#9
0
    def __init__(
            self, network, dt=constants.DEFAULT_DT,
            time_scale=constants.DEFAULT_TIME_SCALE,
            host_name=None, graph_label=None,
            database_socket_addresses=None, dsg_algorithm=None,
            n_chips_required=None, extra_pre_run_algorithms=None,
            extra_post_run_algorithms=None, decoder_cache=NoDecoderCache(),
            function_of_time_nodes=None,
            function_of_time_nodes_time_period=None):
        """Create a new Simulator with the given network.
        
        :param time_scale: Scaling factor to apply to the simulation, e.g.,\
            a value of `0.5` will cause the simulation to run at twice \
            real-time.
        :type time_scale: float
        :param host_name: Hostname of the SpiNNaker machine to use; if None\  
            then the machine specified in the config file will be used.
        :type host_name: basestring or None
        :param dt: The length of a simulator timestep, in seconds.
        :type dt: float
        :param graph_label: human readable graph label
        :type graph_label: basestring
        :param database_socket_addresses:
        :type database_socket_addresses:
        :param dsg_algorithm:
        :type dsg_algorithm:
        :param n_chips_required:
        :type n_chips_required:
        :param extra_post_run_algorithms:
        :type extra_post_run_algorithms:
        :param extra_pre_run_algorithms:
        :type extra_pre_run_algorithms:
        values
        :rtype None
        """
        self._nengo_object_to_data_map = dict()
        self._profiled_nengo_object_to_data_map = dict()
        self._nengo_to_app_graph_map = None
        self._app_graph_to_nengo_operator_map = None
        self._nengo_app_machine_graph_mapper = None

        executable_finder = ExecutableFinder()
        executable_finder.add_path(os.path.dirname(binaries.__file__))

        # Calculate the machine timestep, this is measured in microseconds
        # (hence the 1e6 scaling factor).
        machine_time_step = (
            int((dt / time_scale) *
                constants.SECONDS_TO_MICRO_SECONDS_CONVERTER))

        xml_paths = list()
        xml_paths.append(os.path.join(os.path.dirname(
            overridden_mapping_algorithms.__file__),
            self.NENGO_ALGORITHM_XML_FILE_NAME))

        SpiNNaker.__init__(
            self, executable_finder, host_name=host_name,
            graph_label=graph_label,
            database_socket_addresses=database_socket_addresses,
            dsg_algorithm=dsg_algorithm,
            n_chips_required=n_chips_required,
            extra_pre_run_algorithms=extra_pre_run_algorithms,
            extra_post_run_algorithms=extra_post_run_algorithms,
            time_scale_factor=time_scale,
            default_config_paths=[(
                os.path.join(os.path.dirname(__file__),
                             self.CONFIG_FILE_NAME))],
            machine_time_step=machine_time_step,
            extra_xml_paths=xml_paths,
            chip_id_allocator="NengoMallocBasedChipIDAllocator")

        # only add the sdram edge allocator if not using a virtual board
        extra_mapping_algorithms = list()
        if not helpful_functions.read_config_boolean(
                self.config, "Machine", "virtual_board"):
            extra_mapping_algorithms.append(
                "NengoSDRAMOutgoingPartitionAllocator")

        if function_of_time_nodes is None:
            function_of_time_nodes = list()
        if function_of_time_nodes_time_period is None:
            function_of_time_nodes_time_period = list()

        # update the main flow with new algorithms and params
        self.extend_extra_mapping_algorithms(extra_mapping_algorithms)
        self.update_extra_inputs(
            {"UserCreateDatabaseFlag": True,
             'DefaultNotifyHostName': self.config.get_str(
                "Database", "notify_hostname"),
             'NengoNodesAsFunctionOfTime': function_of_time_nodes,
             'NengoNodesAsFunctionOfTimeTimePeriod':
                 function_of_time_nodes_time_period,
             'NengoModel': network,
             'NengoDecoderCache': decoder_cache,
             "NengoNodeIOSetting": self.config.get("Simulator", "node_io"),
             "NengoEnsembleProfile":
                 self.config.getboolean("Ensemble", "profile"),
             "NengoEnsembleProfileNumSamples":
                 helpful_functions.read_config_int(
                     self.config, "Ensemble", "profile_num_samples"),
             "NengoRandomNumberGeneratorSeed":
                helpful_functions.read_config_int(
                    self.config, "Simulator", "global_seed"),
             "NengoUtiliseExtraCoreForProbes":
                self.config.getboolean(
                    "Node", "utilise_extra_core_for_probes"),
             "MachineTimeStepInSeconds": dt,
             "ReceiveBufferPort": helpful_functions.read_config_int(
                self.config, "Buffers", "receive_buffer_port"),
             "ReceiveBufferHost": self.config.get(
                 "Buffers", "receive_buffer_host"),
             "MinBufferSize": self.config.getint(
                 "Buffers", "minimum_buffer_sdram"),
             "MaxSinkBuffingSize": self.config.getint(
                 "Buffers", "sink_vertex_max_sdram_for_buffing"),
             "UsingAutoPauseAndResume": self.config.getboolean(
                 "Buffers", "use_auto_pause_and_resume"),
             "TimeBetweenRequests": self.config.getint(
                 "Buffers", "time_between_requests"),
             "BufferSizeBeforeReceive": self.config.getint(
                 "Buffers", "buffer_size_before_receive"),
             "SpikeBufferMaxSize": self.config.getint(
                "Buffers", "spike_buffer_size"),
             "VariableBufferMaxSize": self.config.getint(
                "Buffers", "variable_buffer_size")})

        # build app graph, machine graph, as the main tools expect an
        # application / machine graph level, and cannot go from random to app
        #  graph.
        nengo_app_graph_generator = NengoApplicationGraphGenerator()

        (self._nengo_operator_graph, host_network,
         self._nengo_to_app_graph_map, self._app_graph_to_nengo_operator_map,
         random_number_generator) = \
            nengo_app_graph_generator(
            self._extra_inputs["NengoModel"], self.machine_time_step,
            self._extra_inputs["NengoRandomNumberGeneratorSeed"],
            self._extra_inputs["NengoDecoderCache"],
            self._extra_inputs["NengoUtiliseExtraCoreForProbes"],
            self._extra_inputs["NengoNodesAsFunctionOfTime"],
            self._extra_inputs["NengoNodesAsFunctionOfTimeTimePeriod"],
            self.config.getboolean("Node", "optimise_utilise_interposers"),
            self._print_timings, self._do_timings, self._xml_paths,
            self._pacman_executor_provenance_path,
            self._extra_inputs["NengoEnsembleProfile"],
            self._extra_inputs["NengoEnsembleProfileNumSamples"],
            self._extra_inputs["ReceiveBufferPort"],
            self._extra_inputs["ReceiveBufferHost"],
            self._extra_inputs["MinBufferSize"],
            self._extra_inputs["MaxSinkBuffingSize"],
            self._extra_inputs["UsingAutoPauseAndResume"],
            self._extra_inputs["TimeBetweenRequests"],
            self._extra_inputs["BufferSizeBeforeReceive"],
            self._extra_inputs["SpikeBufferMaxSize"],
            self._extra_inputs["VariableBufferMaxSize"],
            self._extra_inputs["MachineTimeStepInSeconds"])

        # add the extra outputs as new inputs
        self.update_extra_inputs(
            {"NengoHostGraph": host_network,
             "NengoGraphToAppGraphMap": self._nengo_to_app_graph_map,
             "AppGraphToNengoOperatorMap":
                 self._app_graph_to_nengo_operator_map,
             "NengoRandomNumberGenerator": random_number_generator,
             "NengoOperatorGraph": self._nengo_operator_graph})
 def __init__(self, machine_time_step=None, time_scale_factor=None):
     AbstractSpinnakerBase.__init__(
         self, base.CONFIG_FILE, ExecutableFinder())
     self.set_up_timings(machine_time_step, time_scale_factor)